diff --git a/.github/workflows/root-634.yml b/.github/workflows/root-634.yml new file mode 100644 index 0000000000000..50cff9a24bd0e --- /dev/null +++ b/.github/workflows/root-634.yml @@ -0,0 +1,34 @@ + +name: 'ROOT 6.34' + +on: + schedule: + - cron: '01 1 * * *' + + workflow_dispatch: + inputs: + incremental: + description: 'Do incremental build' + type: boolean + required: true + default: true + binaries: + description: Create binary packages and upload them as artifacts + type: boolean + required: true + default: false + buildtype: + description: The CMAKE_BUILD_TYPE to use for non-Windows. + type: choice + options: + - Debug + - RelWithDebInfo + - Release + - MinSizeRel + default: Debug + required: true + +jobs: + run_nightlies: + uses: root-project/root/.github/workflows/root-ci.yml@v6-34-00-patches + secrets: inherit diff --git a/.github/workflows/root-ci-config/buildconfig/fedora41.txt b/.github/workflows/root-ci-config/buildconfig/fedora41.txt new file mode 100644 index 0000000000000..cb1da137b6b97 --- /dev/null +++ b/.github/workflows/root-ci-config/buildconfig/fedora41.txt @@ -0,0 +1,5 @@ +builtin_zstd=ON +builtin_zlib=ON +builtin_nlohmannjson=On +builtin_vdt=On +pythia8=Off diff --git a/.github/workflows/root-ci-config/buildconfig/ubuntu2410.txt b/.github/workflows/root-ci-config/buildconfig/ubuntu2410.txt new file mode 100644 index 0000000000000..58d56a8a0b9c1 --- /dev/null +++ b/.github/workflows/root-ci-config/buildconfig/ubuntu2410.txt @@ -0,0 +1,2 @@ +pythia8=OFF +tmva-cpu=OFF diff --git a/.github/workflows/root-ci.yml b/.github/workflows/root-ci.yml index 551a4dcd6b60a..3b75040d98a4e 100644 --- a/.github/workflows/root-ci.yml +++ b/.github/workflows/root-ci.yml @@ -20,13 +20,13 @@ on: inputs: head_ref: type: string - default: master + default: v6-34-00-patches base_ref: type: string - default: master + default: v6-34-00-patches ref_name: type: string - default: master + default: v6-34-00-patches # Enables manual start of workflow workflow_dispatch: @@ -153,6 +153,9 @@ jobs: with: build-directory: /Users/sftnight/ROOT-CI/src/ + - name: Set up curl CA bundle for Davix to work with https + run: 'echo SSL_CERT_FILE=/opt/local/share/curl/curl-ca-bundle.crt >> $GITHUB_ENV' + - name: Pull Request Build if: github.event_name == 'pull_request' env: @@ -358,6 +361,8 @@ jobs: include: - image: fedora40 overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_CXX_STANDARD=20"] + - image: fedora41 + overrides: ["LLVM_ENABLE_ASSERTIONS=On"] - image: alma8 overrides: ["LLVM_ENABLE_ASSERTIONS=On"] - image: alma9 @@ -368,6 +373,8 @@ jobs: overrides: ["imt=Off", "LLVM_ENABLE_ASSERTIONS=On", "CMAKE_BUILD_TYPE=Debug"] - image: ubuntu2404 overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_BUILD_TYPE=Debug"] + - image: ubuntu2410 + overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_BUILD_TYPE=Debug"] - image: debian125 overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_CXX_STANDARD=20"] # Special builds @@ -388,10 +395,11 @@ jobs: is_special: true property: clang overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_C_COMPILER=clang", "CMAKE_CXX_COMPILER=clang++"] - - image: ubuntu2404-cuda - is_special: true - property: gpu - extra-runs-on: gpu + # Disable until the DNS issues are understood + # - image: ubuntu2404-cuda + # is_special: true + # property: gpu + # extra-runs-on: gpu runs-on: - self-hosted diff --git a/.github/workflows/root-docs-634.yml b/.github/workflows/root-docs-634.yml new file mode 100644 index 0000000000000..ad30f934a1ded --- /dev/null +++ b/.github/workflows/root-docs-634.yml @@ -0,0 +1,25 @@ + +name: 'ROOT Docs 6.34' + +on: + schedule: + - cron: '0 1 * * *' + - cron: '0 12 * * *' + + workflow_dispatch: + inputs: + incremental: + description: 'Do incremental build' + type: boolean + required: true + default: true + # docu_input: # opportunity: overwrite makeinput.sh with these args + # description: Folders to build documentation for. All folders are built if empty. + # type: string + # default: "" + # required: false + +jobs: + run_nightlies: + uses: root-project/root/.github/workflows/root-docs-ci.yml@v6-34-00-patches + secrets: inherit diff --git a/.github/workflows/root-docs-ci.yml b/.github/workflows/root-docs-ci.yml new file mode 100644 index 0000000000000..4049b89c6ee78 --- /dev/null +++ b/.github/workflows/root-docs-ci.yml @@ -0,0 +1,181 @@ +name: 'ROOT Docs CI' + +on: + + # Allows nightly builds to trigger one run for each branch easily, by + # providing the relevant branch as "default" value here: + workflow_call: + inputs: + incremental: + type: boolean + default: true + + workflow_dispatch: + inputs: + incremental: + description: 'Do incremental build' + type: boolean + required: true + default: true + # docu_input: # opportunity: overwrite makeinput.sh with these args + # description: Folders to build documentation for. All folders are built if empty. + # type: string + # default: "" + # required: false + +jobs: + build-docs: + if: github.repository_owner == 'root-project' + + runs-on: + - self-hosted + - linux + - x64 + + env: + PLATFORM: alma9 + DOC_DIR: v6-34-00-patches + DOC_LOCATION: /github/home + BASE_REF: v6-34-00-patches + WEB_DIR_NAME: v634 + TAR_NAME: html634.tar + + permissions: + contents: read + + container: + image: registry.cern.ch/root-ci/alma9:buildready # ALSO UPDATE BELOW! + options: '--security-opt label=disable --rm --name rootdoc' # ALSO UPDATE BELOW! + env: + OS_APPLICATION_CREDENTIAL_ID: '7f5b64a265244623a3a933308569bdba' + OS_APPLICATION_CREDENTIAL_SECRET: ${{ secrets.OS_APPLICATION_CREDENTIAL_SECRET }} + OS_AUTH_TYPE: 'v3applicationcredential' + OS_AUTH_URL: 'https://keystone.cern.ch/v3' + OS_IDENTITY_API_VERSION: 3 + OS_INTERFACE: 'public' + OS_REGION_NAME: 'cern' + PYTHONUNBUFFERED: true + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python Virtual Env + # if the `if` expr is false, `if` still has exit code 0. + # if the `if` block is entered, the block's exit code becomes the exit + # code of the `if`. + run: 'if [ -d /py-venv/ROOT-CI/bin/ ]; then . /py-venv/ROOT-CI/bin/activate && echo PATH=$PATH >> $GITHUB_ENV; fi' + + - name: Set up directory name and tar filenames + run: | + echo TAR_NAME=html${BASE_REF}.tar >> $GITHUB_ENV + echo DOCDIR_NAME=${BASE_REF} >> $GITHUB_ENV + + # TODO: install latest versions in image on root-ci-images + - name: Install Doxygen 1.10.0 + run : | + mkdir -p ${{ github.workspace }}/doxygen + curl -L https://github.com/doxygen/doxygen/releases/download/Release_1_10_0/doxygen-1.10.0.linux.bin.tar.gz | tar -xz -C ${{ github.workspace }}/doxygen/ --strip-components=1 + echo PATH=$PATH:${{ github.workspace }}/doxygen/bin >> $GITHUB_ENV + + - name: Install qhelpgenerator-qt5 + run: | + dnf update -y + dnf upgrade -y + dnf install -y qt5-doctools + which qhelpgenerator-qt5 + + - name: Apply option overrides + env: + OVERRIDES: "testing=Off roottest=Off minimal=On" + CONFIGFILE: '.github/workflows/root-ci-config/buildconfig/alma9.txt' + shell: bash + run: | + set -x + echo '' >> "$CONFIGFILE" + for ENTRY in $OVERRIDES; do + KEY=$( echo "$ENTRY" | cut -d '=' -f 1 ) + # Add entry to file if not exists, otherwise replace + if grep -q "$KEY=" "$CONFIGFILE"; then + sed -i "s/$KEY=.*\$/$ENTRY/" "$CONFIGFILE" + else + echo "$ENTRY" >> "$CONFIGFILE" + fi + done + cat "$CONFIGFILE" || true + + - name: Build ROOT - Workflow Dispatch + if: github.event_name == 'workflow_dispatch' + run: ".github/workflows/root-ci-config/build_root.py + --buildtype Release + --platform ${{ env.PLATFORM }} + --incremental false + --base_ref ${BASE_REF} + --head_ref ${BASE_REF} + --binaries false + --repository ${{ github.server_url }}/${{ github.repository }}" + + - name: Build ROOT - Schedule + if: github.event_name == 'schedule' + run: ".github/workflows/root-ci-config/build_root.py + --buildtype Release + --platform ${{ env.PLATFORM }} + --incremental false + --base_ref ${BASE_REF} + --head_ref ${BASE_REF} + --binaries false + --repository ${{ github.server_url }}/${{ github.repository }}" + + - name: Run Doxygen + working-directory: ${{ env.DOC_LOCATION }} + shell: bash + run: | + source ROOT-CI/build/bin/thisroot.sh + export DOXYGEN_OUTPUT_DIRECTORY=/github/home/${DOC_DIR} + cd ROOT-CI/src/documentation/doxygen + make -j `nproc --all` + + - name: Create documentation archives + working-directory: ${{ env.DOC_LOCATION }} + shell: bash + run: | + pwd + ls -l + echo ${DOC_DIR} + echo ${TAR_NAME} + ls -l ${DOC_DIR} + tar cf ${TAR_NAME} ${DOC_DIR} + gzip ${TAR_NAME} + ls -l + + #Upload to GitHub as an artifact + - name: Upload tar file for GH + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v4 + with: + name: ${{env.TAR_NAME}}.gz + path: ${{env.DOC_LOCATION}}/${{env.TAR_NAME}}.gz + if-no-files-found: error + + - name: Install AWS CLI + run: | + python -m pip install --upgrade pip + pip install awscli==1.36.40 + aws configure set default.s3.max_concurrent_requests 128 + + - name: Sync documentation to S3 + working-directory: ${{ env.DOC_LOCATION }} + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_ENDPOINT_URL: https://s3.cern.ch/ + + run: | + pwd + ls -l + aws s3 sync ${DOC_DIR}/html/ s3://root/doc/${WEB_DIR_NAME}/ + rm -rf ${DOC_DIR}/html + aws s3 sync ${DOC_DIR}/ s3://root/doc/${WEB_DIR_NAME}/ + aws s3 cp ${TAR_NAME}.gz s3://root/download/ diff --git a/CMakeLists.txt b/CMakeLists.txt index 0b769aa555cb9..be930aea2a798 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -125,7 +125,7 @@ function(relatedrepo_GetClosestMatch) # Otherwise, try to use a branch that matches `current_head` in the fork repository execute_process(COMMAND ${GIT_EXECUTABLE} ls-remote --heads --tags - ${__ORIGIN_PREFIX}/${__REPO_NAME} ${current_head} OUTPUT_VARIABLE matching_refs) + ${__ORIGIN_PREFIX}/${__REPO_NAME} ${current_head} OUTPUT_VARIABLE matching_refs ERROR_QUIET) if(NOT "${matching_refs}" STREQUAL "") set(${__FETCHURL_VARIABLE} ${__ORIGIN_PREFIX}/${__REPO_NAME} PARENT_SCOPE) return() @@ -644,8 +644,12 @@ if(testing) endif() if(DEFINED repo_dir) execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${repo_dir}/.git - remote get-url origin OUTPUT_VARIABLE originurl OUTPUT_STRIP_TRAILING_WHITESPACE) - + remote get-url origin OUTPUT_VARIABLE originurl OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE query_result + ERROR_VARIABLE query_error) + if(NOT query_result EQUAL 0) + message(STATUS "Searching for \"origin\" repo of roottest: ${query_error}") + endif() else() # The fetch URL of the 'origin' remote is used to determine the prefix for other repositories by # removing the `/root(\.git)?` part. If `GITHUB_PR_ORIGIN` is defined in the environment, its @@ -657,7 +661,7 @@ if(testing) remote get-url origin OUTPUT_VARIABLE originurl OUTPUT_STRIP_TRAILING_WHITESPACE) endif() endif() - string(REGEX REPLACE "/root(test)?(\.git)?$" "" originprefix ${originurl}) + string(REGEX REPLACE "/root(test)?(\.git)?$" "" originprefix "${originurl}") relatedrepo_GetClosestMatch(REPO_NAME roottest ORIGIN_PREFIX ${originprefix} UPSTREAM_PREFIX ${upstreamprefix} FETCHURL_VARIABLE roottest_url FETCHREF_VARIABLE roottest_ref) @@ -690,7 +694,15 @@ if(testing) endif() if(LLVM_LINKER_IS_MOLD) - message(FATAL_ERROR "The mold linker is not supported by ROOT. Please use a different linker") + execute_process( + COMMAND mold --version + OUTPUT_VARIABLE MOLD_VERSION + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + string(REGEX REPLACE "mold ([0-9]+\\.[0-9]+\\.[0-9]+).*" "\\1" MOLD_VERSION "${MOLD_VERSION}") + if(MOLD_VERSION VERSION_LESS "2.32.0") + message(FATAL_ERROR "The mold linker version ${MOLD_VERSION} is not supported by ROOT. Please use mold >= 2.32.0 or a different linker") + endif() endif() cmake_host_system_information(RESULT PROCESSOR QUERY PROCESSOR_DESCRIPTION) diff --git a/README/ReleaseNotes/v634/index.md b/README/ReleaseNotes/v634/index.md index 227f88c56f826..d9ad1613bb5cf 100644 --- a/README/ReleaseNotes/v634/index.md +++ b/README/ReleaseNotes/v634/index.md @@ -1,10 +1,15 @@ % ROOT Version 6.34 Release Notes -% 2025-05 +% 2024-11 +## Important note about this development release + +6.34 is a short term support cycle not meant to be used for data taking. It will be superseded by the 6.36 cycle, which is foreseen to start with 6.36.00 in the second quarter of 2025. Patch releases of the 6.36 cycle will be provided until June 30th 2025. + + ## Introduction -ROOT version 6.34.00 is scheduled for release at the end of May 2025. +The development ROOT version 6.34.00 is scheduled for release at the end of November 2024. For more information, see: @@ -12,11 +17,10 @@ For more information, see: The following people have contributed to this new version: - Anton Alkin, Sungkyunkwan University\ Guilherme Amadio, CERN/IT,\ - Abhigyan Acherjee, University of Cincinnati,\ Bertrand Bellenot, CERN/EP-SFT,\ Jakob Blomer, CERN/EP-SFT,\ + Patrick Bos, Netherlands eScience Center,\ Rene Brun,\ Carsten Burgard, DESY\ Will Buttinger, RAL,\ @@ -25,38 +29,40 @@ The following people have contributed to this new version: Olivier Couet, CERN/EP-SFT,\ Marta Czurylo, CERN/EP-SFT,\ Monica Dessole, CERN/EP-SFT,\ + Adrian Duesselberg, TU Munchen,\ Mattias Ellert, Uppsala University,\ Gerri Ganis, CERN/EP-SFT,\ Florine de Geus, CERN/University of Twente,\ Andrei Gheata, CERN/EP-SFT,\ - Bernhard Manfred Gruber,\ Enrico Guiraud,\ + Stephan Hageboeck, CERN/EP-SFT,\ Jonas Hahnfeld, CERN/Goethe University Frankfurt,\ - Fernando Hueso Gonzalez, University of Valencia\ + Fernando Hueso Gonzalez, University of Valencia,\ Attila Krasznahorkay, CERN/EP-ADP-OS,\ Wim Lavrijsen, LBL,\ - Valerii Kholoimov, National University of Kyiv/IRIS-HEP, \ - Dennis Klein, GSI,\ - Christoph Langenbruch, Heidelberg University/LHCb,\ + Aaron Jomy, CERN/EP-SFT,\ + Ida Kaspary, Imperial College,\ + Valerii Kholoimov, National University of Kyiv/IRIS-HEP,\ Sergey Linev, GSI,\ Javier Lopez-Gomez,\ Pere Mato, CERN/EP-SFT,\ + Andrea Maria Ola Mejicanos, Berea College,\ Alaettin Serhan Mete, Argonne,\ Thomas Madlener, DESY,\ + Vedant Mehra, GSOC, \ Lorenzo Moneta, CERN/EP-SFT,\ Alja Mrak Tadel, UCSD/CMS,\ Axel Naumann, CERN/EP-SFT,\ - Dante Niewenhuis, VU Amsterdam\ - Luis Antonio Obis Aparicio, University of Zaragoza,\ Ianna Osborne, Princeton University,\ Vincenzo Eduardo Padulano, CERN/EP-SFT,\ + Giacomo Parolini, CERN/EP-SFT,\ Danilo Piparo, CERN/EP-SFT,\ + Kristupas Pranckietis, Vilnius University,\ Fons Rademakers, CERN/IT,\ Jonas Rembser, CERN/EP-SFT,\ Andrea Rizzi, University of Pisa,\ Andre Sailer, CERN/EP-SFT,\ - Garima Singh, ETH,\ - Juraj Smiesko, CERN/RCS-PRJ-FC,\ + Nopphakorn Subsa-Ard, KMUTT,\ Pavlo Svirin, National Technical University of Ukraine,\ Robin Syring, Leibniz University Hannover, CERN/EP-SFT,\ Maciej Szymanski, Argonne,\ @@ -69,15 +75,26 @@ The following people have contributed to this new version: Wouter Verkerke, NIKHEF/ATLAS,\ Stefan Wunsch\ -## Deprecation and Removal +## Removal and Deprecation + +The following interfaces have been removed: - The `RooAbsReal::plotSliceOn()` function that was deprecated since at least ROOT 6 was removed. Use `plotOn(frame,Slice(...))` instead. +- Multiple overloads of internal Minuit 2 constructors and functions have been removed. If your code fails to compile, you can easily change to another overload that takes a `MnUserParameterState`, which is a change backwards compatible with older ROOT versions. + +The following interfaces are deprecated and will be removed in future releases: + - The `RooTemplateProxy` constructors that take a `proxyOwnsArg` parameter to manually pass ownership are deprecated and replaced by a new constructor that takes ownership via `std::unique_ptr`. They will be removed in ROOT 6.36. - Several RooFit legacy functions are deprecated and will be removed in ROOT 6.36 (see section "RooFit libraries") -- Multiple overloads of internal Minuit 2 constructors and functions have been removed. If your code fails to compile, you can easily change to another overload that takes a `MnUserParameterState`, which is a change backwards compatible with older ROOT versions. +- The `int ROOT::CompressionSettings(ROOT::ECompressionAlgorithm algorithm, int compressionLevel)` function is deprecated and will be removed in ROOT 6.36. Please use `int CompressionSettings(RCompressionSetting::EAlgorithm::EValues algorithm, int compressionLevel)` instead. +- The `void R__zip(int cxlevel, int *srcsize, char *src, int *tgtsize, char *tgt, int *irep)` function is deprecated and will be removed in ROOT 6.36. Please use `void R__zipMultipleAlgorithm(int cxlevel, int *srcsize, char *src, int *tgtsize, char *tgt, int *irep, ROOT::RCompressionSetting::EAlgorithm::EValues algorithm)` instead. +- The `Bool_t TGeoShape::AreOverlapping(const TGeoBBox *box1, const TGeoMatrix *mat1, const TGeoBBox *box2, const TGeoMatrix *mat2)` function is deprecated and will be removed in ROOT 6.36. +- The `TPython::Eval()` function is deprecated and scheduled for removal in ROOT 6.36. + ## Core Libraries +* The Cling C++ interpreter now relies on LLVM version 18. * The `rootcling` invocation corresponding to a `genreflex` invocation can be obtained with the new `genreflex` command line argument `--print-rootcling-invocation`. This can be useful when migrating from genreflex to rootcling. @@ -85,8 +102,46 @@ The following people have contributed to this new version: ## I/O Libraries +## RNTuple Libraries + +* The first version of the `RNTuple` on-disk binary format is finalized. Future versions of ROOT will be able to read back + RNTuple data written as of this release. Please note that this version breaks compatibility with experimental RNTuple + data written with releases up to v6.34. Please also note that the RNTuple API is not yet moving out of + `ROOT::Experimental`. +* Support for low-precision on-disk floating point representation. This can be enabled through + `RField::SetTruncated()` (truncated mantissa) and `RField::SetQuantized()` + (scaled integer representation). +* Link RNTuple self-description to the common ROOT streamer infrastructure. As a result, `TFile::MakeProject()` + properly creates header files for classes used in RNTuple data. +* First version of the new `RNTupleProcessor` class. The `RNTupleProcessor` will support iteration of composed RNTuple data sets (comparable to and improving upon TTree friends and chains). This release supports chained (vertically composed) RNTuples. Other types of concatenations will be added in subsequent releases. +* Support for cluster staging in the `RNTupleParallelWriter`. Cluster staging enables users to enforce a certain + logical cluster ordering in the presence of parallel cluster writing. +* Support for Direct I/O for writing. This gives access to the peak performance of modern NVMe drives. +* Support for a "streamer field" that can wrap classic ROOT I/O serialized data for RNTuple in cases where native + RNTuple support is not possible (e.g., recursive data structures). Use of the streamer field can be enforced + through the LinkDef option `rntupleStreamerMode(true)`. This features is similar to the unsplit/level-0-split branch in `TTree`. +* Naming rules have been established for the strings representing the name of an RNTuple and the name of a field. The + allowed character set is restricted to Unicode characters encoded as UTF-8, with the following exceptions: control + codes, full stop, space, backslash, slash. See a full description in the RNTuple specification. The naming rules are + also enforced when creating a new RNTuple or field for writing. +* Many fixes to RNTuple merging, both through `hadd` and when using the `RNTupleMerger` class directly. Most notable + of these fixes is the proper handling of projected fields. +* Many additional bug fixes and improvements. ## TTree Libraries +* TTreeReader can now detect whether there is a mismatched number of entries between the main trees and the friend tree + and act accordingly in two distinct scenarios. In the first scenario, at least one of the friend trees is shorter than + the main tree, i.e. it has less entries. When the reader is trying to load an entry from the main tree which is beyond + the last entry of the shorter friend, this will result in an error and stop execution. In the second scenario, at + least one friend is longer than the main tree, i.e. it has more entries. Once the reader arrives at the end of the + main tree, it will issue a warning informing the user that there are still entries to be read from the longer friend. +* TTreeReader can now detect whether a branch, which was previously expected to exist in the dataset, has disappeared + due to e.g. a branch missing when switching to the next file in a chain of files. +* TTreeReader can now detect whether an entry being read is incomplete due to one of the following scenarios: + * When switching to a new tree in the chain, a branch that was expected to be found is not available. + * When doing event matching with TTreeIndex, one or more of the friend trees did not match the index value for + the current entry. + ## RDataFrame @@ -96,22 +151,67 @@ The following people have contributed to this new version: code that was not yet available on the user's local application, but that would only become available in the distributed worker. Now a call such as `df.Define("mycol", "return run_my_fun();")` needs to be at least declarable to the interpreter also locally so that the column can be properly tracked. +* The order of execution of operations within the same branch of the computation graph is now guaranteed to be top to + bottom. For example, the following code: + ~~~{.cpp} + ROOT::RDataFrame df{1}; + auto df1 = df.Define("x", []{ return 11; }); + auto df2 = df1.Define("y", []{ return 22; }); + auto graph = df2.Graph("x","y"); + ~~~ + will first execute the operation `Define` of the column `x`, then the one of the column `y`, when filling the graph. +* The `DefinePerSample` operation now works also in the case when a TTree is stored in a subdirectory of a TFile. +* The memory usage of distributed RDataFrame was drastically reduced by better managing caches of the computation graph + artifacts. Large applications which previously had issues with killed executors due to being out of memory now show a + minimal memory footprint. See https://github.com/root-project/root/pull/16094#issuecomment-2252273470 for more details. +* RDataFrame can now read TTree branches of type `std::array` on disk explicitly as `std::array` values in memory. +* New parts of the API were added to allow dealing with missing data in a TTree-based dataset: + * DefaultValueFor(colname, defaultval): lets the user provide one default value for the current entry of the input + column, in case the value is missing. + * FilterAvailable(colname): works in the same way as the traditional Filter operation, where the "expression" is "is + the value available?". If so, the entry is kept, if not, it is discarded. + * FilterMissing(colname): works in the same way as the traditional Filter operation, where the "expression" is "is + the value missing?". If so, the entry is kept, if not, it is discarded. + The tutorials `df036_missingBranches` and `df037_TTreeEventMatching` show example usage of the new functionalities. +* The automatic conversion of `std::vector` to `ROOT::RVec` which happens in memory within a JIT-ted RDataFrame + computation graph meant that the result of a `Snapshot` operation would implicitly change the type of the input branch. + A new option available as the data member `fVector2RVec` of the `RSnapshotOptions` struct can be used to prevent + RDataFrame from making this implicit conversion. +* RDataFrame does not take a lock anymore to check reading of supported types when there is a mismatch, see + https://github.com/root-project/root/pull/16528. +* Complexity of lookups during internal checks for type matching has been made constant on average, see the discussions + at https://github.com/root-project/root/pull/16559 and https://github.com/root-project/root/pull/16559. +* Major improvements have been brought to the experimental feature that allows lazily loading ROOT data into batches for + machine learning model training pipelines. For a full description, see the presentation at CHEP 2024 + https://indico.cern.ch/event/1338689/contributions/6015940/. ## Histogram Libraries +* `THStack:GetMinimum()` was not correct in case of negative contents. + ### Upgrade TUnfold to version 17.9 The [TUnfold package](https://www.desy.de/~sschmitt/tunfold.html) inside ROOT is upgraded from version 17.6 to version 17.9. ## Math Libraries -### Usage of `std::span` in Minuit 2 interfaces +### Minuit2 -To avoid forcing the user to do manual memory allocations via `std::vector`, the interfaces of Minuit 2 function adapter classes like `ROOT::Minuit2::FCNBase` or `ROOT::Minuit2::FCNGradientBase` were changed to accept `std::span` arguments instead of `std::vector const&`. +* **Usage of `std::span`in the interface**: To avoid forcing the user to do manual memory allocations via `std::vector`, the interfaces of Minuit 2 function adapter classes like `ROOT::Minuit2::FCNBase` or `ROOT::Minuit2::FCNGradientBase` were changed to accept `std::span` arguments instead of `std::vector const&`. This should have minimal impact on users, since one should usual use Minuit 2 via the `ROOT::Math::Minimizer` interface, which is unchanged. +* **Initial error/covariance matrix values for Hessian matrix**: Initial error/covariance matrix values can be passed for initializating the Hessian matrix to be used in minimization algorithms by attaching the covariance matrix to the `ROOT::Minuit2::MnUserParameterState` instance used for seeding via the method `AddCovariance(const MnUserCovariance &);`. + ## RooFit Libraries +### Error handling in MultiProcess-enabled fits + +The `MultiProcess`-based fitting stack now handles errors during fits. +Error signaling in (legacy) RooFit happens through two mechanisms: `logEvalError` calls and `RooNaNPacker`-enhanced NaN doubles. +Both are now implemented and working for `MultiProcess`-based fits as well. +See [this PR](https://github.com/root-project/root/pull/15797) for more details. +This enables the latest ATLAS Higgs combination fits to complete successfully, and also other fits that encounter NaN values or other expected errors. + ### Miscellaneous * Setting `useHashMapForFind(true)` is not supported for RooArgLists anymore, since hash-assisted finding by name hash can be ambiguous: a RooArgList is allowed to have different elements with the same name. If you want to do fast lookups by name, convert your RooArgList to a RooArgSet. @@ -120,6 +220,10 @@ This should have minimal impact on users, since one should usual use Minuit 2 vi * The `ExportOnly()` attribute of the `RooStats::HistFactory::Measurement` object is now switched on by default, and the associated getter and setter functions are deprecated. They will be removed in ROOT 6.36. If you want to fit the model as well instead of just exporting it to a RooWorkspace, please do so with your own code as demonstrated in the `hf001` tutorial. +* Initial error values can be used for initializating the Hessian matrix to be used in Minuit2 minimization algorithms by setting the `RooMinimizer::Config` option `setInitialCovariance` to `true`. These values correspond to the diagonal entries of the initial covariance matrix. + +* `RooFit::MultiProcess`-enabled fitting developer/advanced documentation -- [available through GitHub](https://github.com/root-project/root/blob/master/roofit/doc/developers/test_statistics.md) -- was updated. It now contains the most up to date usage instructions for optimizing load balancing (and hence run speed) using this backend. + ### Deprecations * The `RooStats::MarkovChain::GetAsDataSet` and `RooStats::MarkovChain::GetAsDataHist` functions are deprecated and will be removed in ROOT 6.36. The same functionality can be implemented by calling `RooAbsData::reduce` on the Markov Chain's `RooDataSet*` (obtained using `MarkovChain::GetAsConstDataSet`) and then obtaining its binned clone(for `RooDataHist`). @@ -144,33 +248,97 @@ They should be replaced with the suitable alternatives interfaces: - `RooAbsArg::checkDependents()`: use `checkObservables()` - `RooAbsArg::recursiveCheckDependents()`: use `recursiveCheckObservables()` +## TMVA SOFIE +The support for new ONNX operators has been included in the SOFIE ONNX parser and in RModel in order to generate inference code for new types of models. +The full list of currently supported operators is available [here](https://github.com/root-project/root/blob/master/tmva/sofie/README.md#supported-onnx-operators) + +The list of operators added for this release is the following: + - Constant and ConstantOfShape + - If + - Range + - ReduceSum + - Split + - Tile + - TopK + +In addition support in RModel has been added to generate the code with dynamic input shape parameter, such as the batch size. These input shape parameters can be specified at run time when evaluating the model. +Since not all ONNX operators in SOFIE support yet dynamic input parameters, it is possible to initialize a parsed dynamic model with fixed values. For this, a new member function, `RModel::Initialize(const std::map & inputParams, bool verbose = false)` has been added. +The RModel class has been extended to support sub-graph (needed for operator `If`), dynamic tensors and constant tensors (for example those defined by the operator `Constant`). + ## Graphics Backends +### Web-based TWebCanvas + +Support "haxis" draw option for histograms, allows superposition of several histograms drawn on the same pad with horizontal ty axis. Add `tutorials\webcanv\haxis.cxx` macro demonstrating new feature. + +Support "frame" draw option for several primitives like `TBox`, `TLine`, `TLatex`. This enforce clipping of such objects by +frame border. Provide demo in `tutorials\webcanv\inframe.cxx` macro + +Provide batch mode for image production with headless browser. In such mode data for several canvases collected together (in batch) and then N images are produced with single invocation of the web browser (chrome or firefox). For instance after `TWebCanvas::BatchImageMode(100)` next 99 calls to `TCanvas::SaveAs(filename)` method will not lead to image files creation. But with following call all 100 images will be produced together. Alternatively one can use `TCanvas::SaveAll()` static method which allows to create images for several canvases at once. + +Support multi-page PDF file creation with web-based canvas using `svg2pdf.js` library. Both with native and web-baed graphics one can do now: +```c++ +c1->SaveAs("file.pdf[") +c2->SaveAs("file.pdf+") +c3->SaveAs("file.pdf+") +c4->SaveAs("file.pdf]") +``` +Or same can be achieved with: +```c++ +TCanvas::SaveAll({c1, c2, c3, c4}, "file.pdf"); +``` + ## 2D Graphics Libraries +* In `TGraphErrors` `TGraphAsymmErrors` and `TGraphBentErrors`, the error bars were drawn inside the marker when the marker was bigger than the error bars. This produced a weird plot. This is now fixed. + +* When error-bars exceeded the y range limits the end of error bars were nevertheless displayed was not correcton the x-bottom and top axis. So it looked like the total error bar while it was indeed not. +* Choosing an appropriate color scheme is essential for making results easy to understand and interpret. Factors like colorblindness and converting colors to grayscale for publications can impact accessibility. Furthermore, results should be aesthetically pleasing. The following three color schemes, recommended by M. Petroff in [arXiv:2107.02270v2](https://arxiv.org/pdf/2107.02270) and available on [GitHub](https://github.com/mpetroff/accessible-color-cycles) under the MIT License, meet these criteria. + +* Implement properly the TScatter palette attributes as requested [here](https://github.com/root-project/root/issues/15922). + +* Add `TStyle::SetLegendFillStyle` ## 3D Graphics Libraries +### REve +* Update RenderCore rendering engine to version 1.6 with improved +implementation of Signed Distance Field (SDF) fonts. -## Geometry Libraries +* Implement REveText element to draw text with SDF fonts in screen or +world coordinates. See the new example in tutorials/eve7/texts.C +* Add initial version of REve overlays: a 2D area in screen coordinates +that can draw text and frames in relative proportions; support position +and scale editing on the client side. -## Database Libraries +* Draw axis labels with SDF fonts in the mixed space-screen coordinate +system. +* Introduce REveGeoTopNode: a wrapper over a TGeoNode, possibly +displaced with a global transformation stored in REveElement. It holds a +pointer to TGeoManager and controls for steering of TGeoPainter +(fVisOption, fVisLevel and fMaxVisNodes). -## Networking Libraries +* Integrate JSRoot hierarchical node browser in REve as REveGeoTable +element. The demonstration of this feature is included in example +tutorial/eve7/eveGeoBrowser.C -## GUI Libraries +## Geometry Libraries +The geometry package is now optional and activated by default in the CMake configuration. To disable it, use the `-Dgeom=OFF` CMake option. -## Montecarlo Libraries +## Web-based GUIs +Adjust `rootssh` script to be usable on MacOS. Fixing problem to start more than one web widget on remote node. -## PROOF Libraries +Fix `rootbrowse` script to be able properly use it with all kinds of web widgets. Provide `--web=` argument as for +regular root executable. +Update openui5 library to version 1.128.0. Requires use of modern web-browsers, skipping IE support. -## PyROOT +## Python Interface ### Typesafe `TTree::SetBranchAddress()` for array inputs @@ -207,19 +375,317 @@ std::any result; TPython::Exec("_anyresult = ROOT.std.make_any['std::string']('done')", &result); std::cout << std::any_cast(result) << std::endl; ``` - -## Language Bindings - - ## JavaScript ROOT +Upgrade to JSROOT 7.8.0 with following new features and fixes: + +1. Let use custom time zone for time display, support '&utc' and '&cet' in URL parameters +2. Support gStyle.fLegendFillStyle +3. Let change histogram min/max values via context menu +4. Support Z-scale zooming with `TScatter` +5. Implement "haxis" draw option for histogram to draw only axes for hbar +6. Implement "axisg" and "haxisg" to draw axes with grids +7. Support `TH1` marker, text and line drawing superimposed with "haxis" +8. Support `TBox`, `TLatex`, `TLine`, `TMarker` drawing on "frame", support drawing on swapped axes +9. Implement `TProfile` and `TProfile2D` projections https://github.com/root-project/root/issues/15851 +10. Draw total histogram from `TEfficiency` when draw option starts with 'b' +11. Let redraw `TEfficiency`, `THStack` and `TMultiGraph` with different draw options via hist context menu +12. Support 'pads' draw options for `TMultiGraph`, support context menu for it +13. Let drop objects on sub-pads +14. Properly loads ES6 modules for web canvas +15. Improve performance of `TH3`/`RH3` drawing by using `THREE.InstancedMesh` +16. Implement batch mode with '&batch' URL parameter to create SVG/PNG images with default GUI +17. Adjust node.js implementation to produce identical output with normal browser +18. Create necessary infrastructure for testing with 'puppeteer' +19. Support injection of ES6 modules via '&inject=path.mjs' +20. Using importmap for 'jsroot' in all major HTML files and in demos +21. Implement `settings.CutAxisLabels` flag to remove labels which may exceed graphical range +22. Let disable usage of `TAxis` custom labels via context menu +23. Let configure default draw options via context menu, preserved in the local storage +24. Let save canvas as JSON file from context menu, object as JSON from inspector +25. Upgrade three.js r162 -> r168, use r162 only in node.js because of "gl" module +26. Create unified svg2pdf/jspdf ES6 modules, integrate in jsroot builds +27. Let create multi-page PDF document - in `TWebCanvas` batch mode +28. Let add in latex external links via `#url[link]{label}` syntax - including jsPDF support +29. Support `TAttMarker` style with line width bigger than 1 +30. Provide link to ROOT class documentation from context menus +31. Implement axis labels and title rotations on lego plots +32. Internals - upgrade to eslint 9 +33. Internals - do not select pad (aka gPad) for objects drawing, always use assigned pad painter +34. Fix - properly save zoomed ranges in drawingJSON() +35. Fix - properly redraw `TMultiGraph` +36. Fix - show empty bin in `TProfile2D` if it has entries #316 +37. Fix - unzooming on log scale was extending range forever +38. Fix - display empty hist bin if fSumw2 not zero +39. Fix - geometry display on android devices + +JSROOT is now used as default display in `jupyter`. + + +## Tools + +### hadd + +* Fixed a bug where in some circumstances `hadd` would not correctly merge objects in nested folders of a ROOT file. + ## Tutorials +* New tutorials [accessiblecolorschemes.C](https://root.cern/doc/master/accessiblecolorschemes_8C.html) and [hstackcolorscheme.C](https://root.cern/doc/master/thstackcolorscheme_8C.html). ## Class Reference Guide ## Build, Configuration and Testing Infrastructure - +- Coverage of the CI was greatly improved, with Clang builds, Alma9 ARM64 and Alma9 x86 NVidia GPU builds were added to the CI + +The following builtins have been updated: + +- daviX 0.8.7 +- XRootD 5.7.1 + +## Bugs and Issues fixed in this release + +More than 200 items were addressed for this release. The full list is: + +* [[#17040](https://github.com/root-project/root/issues/17040)] - Small difference between kp6Violet implementation and official value from Petroff paper +* [[#16976](https://github.com/root-project/root/issues/16976)] - Strange overflow bin bar when plotting TH1D with X1 option +* [[#16946](https://github.com/root-project/root/issues/16946)] - Crash in RDF constructor with empty file list +* [[#16942](https://github.com/root-project/root/issues/16942)] - another crash in finalization +* [[#16834](https://github.com/root-project/root/issues/16834)] - `RFieldBase::Create` does not enforce valid field names +* [[#16826](https://github.com/root-project/root/issues/16826)] - RNTuple unexpected "field iteration over empty fields is unsupported" +* [[#16796](https://github.com/root-project/root/issues/16796)] - RooBinSamplingPdf does not forward expectedEventsFunc creation calls +* [[#16784](https://github.com/root-project/root/issues/16784)] - Remove default value of p from TH1::GetQuantiles() as is the case with TF1::GetQuantiles +* [[#16771](https://github.com/root-project/root/issues/16771)] - copying a default constructed `TH2Poly` fails. +* [[#16753](https://github.com/root-project/root/issues/16753)] - [ntuple] Free uncompressed page buffers in RPageSinkBuf with IMT +* [[#16752](https://github.com/root-project/root/issues/16752)] - [ntuple] Copy sealed page in RPageSinkBuf after compression +* [[#16736](https://github.com/root-project/root/issues/16736)] - Please improve documentation and/or argument names for TH1::GetQuantiles() +* [[#16715](https://github.com/root-project/root/issues/16715)] - TMVA fails to link to cudnn +* [[#16687](https://github.com/root-project/root/issues/16687)] - Loss of floating point precision when saving TCanvas as ROOT macro +* [[#16680](https://github.com/root-project/root/issues/16680)] - TMVA/Sofie tutorials used same name for generated files bur are run in parallel. +* [[#16647](https://github.com/root-project/root/issues/16647)] - ROOT_ADD_PYUNITTEST and ROOT_ADD_GTEST are naming test inconsitently. +* [[#16600](https://github.com/root-project/root/issues/16600)] - TMVA RReader not multithread safe +* [[#16588](https://github.com/root-project/root/issues/16588)] - Fix RFieldBase::GetNElements() for record/class fields +* [[#16562](https://github.com/root-project/root/issues/16562)] - TTreeViewer save session absolute path +* [[#16523](https://github.com/root-project/root/issues/16523)] - OpenGL doesn't work on macosx +* [[#16513](https://github.com/root-project/root/issues/16513)] - [ntuple] Clarifications about late schema extension +* [[#16479](https://github.com/root-project/root/issues/16479)] - Add THStack/TH1 constructor for TRatioPlot +* [[#16475](https://github.com/root-project/root/issues/16475)] - Unable to use EOS tokens with RDataFrame since 6.32 +* [[#16474](https://github.com/root-project/root/issues/16474)] - Hadd does not add correctly histograms in nested folders +* [[#16469](https://github.com/root-project/root/issues/16469)] - cppyy no aggregate initialization constructor +* [[#16419](https://github.com/root-project/root/issues/16419)] - RooUnblindOffset crashes for root version 6.32 +* [[#16402](https://github.com/root-project/root/issues/16402)] - Importing ROOT prevents Python garbage collection +* [[#16374](https://github.com/root-project/root/issues/16374)] - Configuring with builtin xrootd can fail because of seemingly not found OpenSSL library +* [[#16366](https://github.com/root-project/root/issues/16366)] - Compiler warning in Bytes.h: casts away qualifiers +* [[#16360](https://github.com/root-project/root/issues/16360)] - [rdf] gcc14 issue warning in `RDF/InterfaceUtils.hxx` +* [[#16326](https://github.com/root-project/root/issues/16326)] - [ntuple] Better control of cluster ordering for parallel writes +* [[#16324](https://github.com/root-project/root/issues/16324)] - [ntuple] Allow for creating bare model from on-disk info +* [[#16321](https://github.com/root-project/root/issues/16321)] - [ntuple] Split RNTupleView in two classes +* [[#16298](https://github.com/root-project/root/issues/16298)] - [PyROOT] Conversion from `std::string` to `std::string_view` broken in 6.32 +* [[#16290](https://github.com/root-project/root/issues/16290)] - [ntuple] Provide tutorial for (envisioned) framework usage +* [[#16252](https://github.com/root-project/root/issues/16252)] - tutorial-rcanvas-df104-py +* [[#16249](https://github.com/root-project/root/issues/16249)] - Iterating with a range for does one extra iteration +* [[#16244](https://github.com/root-project/root/issues/16244)] - JSROOT not drawing bins with content=0 but entries > 0 in TProfile2D +* [[#16241](https://github.com/root-project/root/issues/16241)] - [ntuple] Method to prepare cluster commit / flush column write buffers +* [[#16236](https://github.com/root-project/root/issues/16236)] - [ntuple] Improve field token usage for parallel writing +* [[#16219](https://github.com/root-project/root/issues/16219)] - Module map on the new XCode version for macos15-beta +* [[#16190](https://github.com/root-project/root/issues/16190)] - TFileMerger behaviour when the directory structure contains repeated names +* [[#16184](https://github.com/root-project/root/issues/16184)] - Serialisation (and therefore I/O) issues with TF1 and TFitResultPtr +* [[#16167](https://github.com/root-project/root/issues/16167)] - TGeomPainter Web not behaving the same way as TGeomPainter ROOT +* [[#16149](https://github.com/root-project/root/issues/16149)] - CMake and xrootd builtin +* [[#16135](https://github.com/root-project/root/issues/16135)] - [ntuple] Cannot create RFieldBase for signed char +* [[#16124](https://github.com/root-project/root/issues/16124)] - RNTupleInspector returns wrong compressed size for large N-tuples +* [[#16121](https://github.com/root-project/root/issues/16121)] - Potential memory leak in clang triggered by `findScope` +* [[#16051](https://github.com/root-project/root/issues/16051)] - TColor::GetFreeColorIndex() returns index that is already used +* [[#16047](https://github.com/root-project/root/issues/16047)] - TMVA SOFIE shadow declaration +* [[#16031](https://github.com/root-project/root/issues/16031)] - VecOps binary functions not using the right types +* [[#16024](https://github.com/root-project/root/issues/16024)] - `thisroot.sh` tries to drop the wrong lib paths from the existing environment +* [[#15977](https://github.com/root-project/root/issues/15977)] - [gui] Event StatusBar does not work well when TMarker outside of zoom region +* [[#15962](https://github.com/root-project/root/issues/15962)] - outdated help links +* [[#15959](https://github.com/root-project/root/issues/15959)] - [RF] Make Offset(“bin”) usable for CLs method +* [[#15948](https://github.com/root-project/root/issues/15948)] - Tex Gyre fonts has a bad side effect ... +* [[#15924](https://github.com/root-project/root/issues/15924)] - python -c 'import ROOT' fails on macOS if ROOT is built with gnuinstall=ON +* [[#15919](https://github.com/root-project/root/issues/15919)] - Problem with TClass::GetListOfAllPublicMethods() in python +* [[#15912](https://github.com/root-project/root/issues/15912)] - Clad issues with `MacOSX15.0.sdk` +* [[#15887](https://github.com/root-project/root/issues/15887)] - Broken plot .C macros for default Name() argument in plotOn() +* [[#15883](https://github.com/root-project/root/issues/15883)] - Initialize TRatioPlot margins from Pad margins set in the current style +* [[#15851](https://github.com/root-project/root/issues/15851)] - Support for TProfile and TProfile2D projectionX and projectionXY options in JSROOT +* [[#15774](https://github.com/root-project/root/issues/15774)] - [ci] Add Python version to Windows precomplied release title or filename +* [[#15756](https://github.com/root-project/root/issues/15756)] - [RF][HS3] ATLAS ttbar workspaces roundtrip +* [[#15740](https://github.com/root-project/root/issues/15740)] - `THStack` does not automatically shows negative bins +* [[#15738](https://github.com/root-project/root/issues/15738)] - Segmentation violation during build on ix86 (32 bit intel) +* [[#15736](https://github.com/root-project/root/issues/15736)] - [df] ProgressBar reporting on number of files is now broken +* [[#15727](https://github.com/root-project/root/issues/15727)] - Windows CMake project cannot find_library() after integrating with ROOT. +* [[#15703](https://github.com/root-project/root/issues/15703)] - Leaking memory though strings in PyROOT +* [[#15686](https://github.com/root-project/root/issues/15686)] - JITted code changes the execution order of computation graph nodes +* [[#15666](https://github.com/root-project/root/issues/15666)] - [ntuple][doc] document RNTuple Anchor format +* [[#15661](https://github.com/root-project/root/issues/15661)] - [ntuple] Cannot properly read late model extension (meta)data +* [[#15643](https://github.com/root-project/root/issues/15643)] - TGFileContainer crashes in pyroot +* [[#15617](https://github.com/root-project/root/issues/15617)] - `RDF::Describe` returns an incorrect file count +* [[#15590](https://github.com/root-project/root/issues/15590)] - Infinite recursion in TFile::Open +* [[#15537](https://github.com/root-project/root/issues/15537)] - [cling] Crash when non-void function does not return a value +* [[#15534](https://github.com/root-project/root/issues/15534)] - RNTuple: fields with mixed STL types sometimes fail to be filled +* [[#15511](https://github.com/root-project/root/issues/15511)] - Possible memory corruption in cling +* [[#15503](https://github.com/root-project/root/issues/15503)] - Allow users to change default Snapshot behaviour of collections +* [[#15460](https://github.com/root-project/root/issues/15460)] - TEnum::GetEnum("B")->GetUnderlyingType() does not following typedefs +* [[#15447](https://github.com/root-project/root/issues/15447)] - `-Dminimal=ON` disables `runtime_cxxmodules` +* [[#15442](https://github.com/root-project/root/issues/15442)] - Distributed RDataFrame does not see all defined column names +* [[#15425](https://github.com/root-project/root/issues/15425)] - TTreeProcessorMP processes events multiple times when there are more threads than entries +* [[#15419](https://github.com/root-project/root/issues/15419)] - RNTuple: add max key length field to RNTuple anchor +* [[#15407](https://github.com/root-project/root/issues/15407)] - `cling::utils::Lookup::Named` does not look into using directive +* [[#15406](https://github.com/root-project/root/issues/15406)] - `TEnum::GetEnum` does not seem to see 'through' using statements. +* [[#15405](https://github.com/root-project/root/issues/15405)] - [RF] ExternalConstraints documentation incorrect for RooMCStudy +* [[#15384](https://github.com/root-project/root/issues/15384)] - GetCppName: Mangled version of the C++ symbol +* [[#15336](https://github.com/root-project/root/issues/15336)] - [MSVC] ROOT_x86 failed due to libCling.exp : error LNK2001: unresolved external symbol "char const * __cdecl __std_find_trivial(char const *,char const *,char) +* [[#15321](https://github.com/root-project/root/issues/15321)] - [MSVC] Root is failed with error G694476FC: static_assert failed "Unexpected size" +* [[#15285](https://github.com/root-project/root/issues/15285)] - Fast element setter/getter for TMatrixT/TVectorT classes +* [[#15270](https://github.com/root-project/root/issues/15270)] - MakeClass and MakeSelector fails with special character in branchname. +* [[#15269](https://github.com/root-project/root/issues/15269)] - Iterators in pyROOT working differently in ROOT master compared to 6.30/02 +* [[#15213](https://github.com/root-project/root/issues/15213)] - cmake warning while configuring +* [[#15178](https://github.com/root-project/root/issues/15178)] - ROOT generates CMake warnings when building from the tarball +* [[#15118](https://github.com/root-project/root/issues/15118)] - jsoninterface does not build if provided with RapidYAML +* [[#15107](https://github.com/root-project/root/issues/15107)] - [ci] clang-format fails when adding commits +* [[#15090](https://github.com/root-project/root/issues/15090)] - TClass::GetClassInfo() is not thread safe +* [[#15039](https://github.com/root-project/root/issues/15039)] - [RDataFrame] Expose more local df operations for distributed RDF +* [[#14966](https://github.com/root-project/root/issues/14966)] - Fix print check for object that return different types for begin() and end() +* [[#14871](https://github.com/root-project/root/issues/14871)] - [ntuple] add streamer info records to TFile +* [[#14809](https://github.com/root-project/root/issues/14809)] - [ntuple] Incorrect treatment of unsplittable classes +* [[#14808](https://github.com/root-project/root/issues/14808)] - [ntuple] TObject serialization faulty +* [[#14789](https://github.com/root-project/root/issues/14789)] - interpreter fails with assertion in debug builds on ARM when upgrading gcc +* [[#14767](https://github.com/root-project/root/issues/14767)] - rootn.exe instant crash on startup +* [[#14710](https://github.com/root-project/root/issues/14710)] - `std::set` not working in Windows PyROOT +* [[#14697](https://github.com/root-project/root/issues/14697)] - [FreeBSD] davix build failure +* [[#14592](https://github.com/root-project/root/issues/14592)] - Error value and context of call to FT_Set_Char_Size in TTF::SetTextSize should be in error message +* [[#14561](https://github.com/root-project/root/issues/14561)] - [ROOT-4936] TMatrixTSym is not actually symmetric +* [[#14544](https://github.com/root-project/root/issues/14544)] - [ROOT-8515] Make TEntryList class reference relevant +* [[#14541](https://github.com/root-project/root/issues/14541)] - [ROOT-6193] Editor for palette axis cannot set title properties +* [[#14487](https://github.com/root-project/root/issues/14487)] - Assert when trying to write RNTuple to full disk +* [[#14217](https://github.com/root-project/root/issues/14217)] - Module merge problems with GCC 13, C++20, Pythia8 +* [[#14173](https://github.com/root-project/root/issues/14173)] - Adding a couple of useful methods in THnD +* [[#14132](https://github.com/root-project/root/issues/14132)] - Lazy multithread RDataFrame::Snapshot cause unnessary warning and break gDirectory +* [[#14055](https://github.com/root-project/root/issues/14055)] - Failing build with `-Dasan=ON` and memory leak in minimal build +* [[#13729](https://github.com/root-project/root/issues/13729)] - [math] Contour method has some problems with Minuit2 +* [[#13677](https://github.com/root-project/root/issues/13677)] - [Cling] Potential unloading issue which breaks distributed execution +* [[#13511](https://github.com/root-project/root/issues/13511)] - TMapFile can't work +* [[#13498](https://github.com/root-project/root/issues/13498)] - Assertion failure in TMVA `can't dereference value-initialized vector iterator` +* [[#13481](https://github.com/root-project/root/issues/13481)] - Update doc to express deprecation of genreflex and usage of rootcling as a replacement +* [[#13432](https://github.com/root-project/root/issues/13432)] - TCling::AutoLoad may not work if a pcm linked to the library is not preloaded +* [[#13055](https://github.com/root-project/root/issues/13055)] - -Dtmva-sofie=OFF does not switch off sofie. +* [[#13016](https://github.com/root-project/root/issues/13016)] - Extra vertical space on a canvas when CanvasPreferGL is set to true, reproducible via SSH +* [[#12935](https://github.com/root-project/root/issues/12935)] - [RF] Global correlation coefficients after SumW2Error +* [[#12842](https://github.com/root-project/root/issues/12842)] - [ntuple] Review the column representation of nullable fields +* [[#12509](https://github.com/root-project/root/issues/12509)] - TClass prefers ` over `` specialization +* [[#12460](https://github.com/root-project/root/issues/12460)] - [ntuple] Set non-negative column flag for unsigned integer fields +* [[#12428](https://github.com/root-project/root/issues/12428)] - Test failure in RNTuple: RNTuple.TClassEBO fails +* [[#12426](https://github.com/root-project/root/issues/12426)] - RNTuple endian issues +* [[#12334](https://github.com/root-project/root/issues/12334)] - TTreeReader fails to read `T` as `T` +* [[#12272](https://github.com/root-project/root/issues/12272)] - CI: releases +* [[#12251](https://github.com/root-project/root/issues/12251)] - Problems with `TH1::GetQuantiles` +* [[#12182](https://github.com/root-project/root/issues/12182)] - TPython::Eval does not work with string with python3.8+ for ROOT 6.24-6.26.8 +* [[#12136](https://github.com/root-project/root/issues/12136)] - [ntuple] `RNTupleView`'s move ctor causes double delete +* [[#12108](https://github.com/root-project/root/issues/12108)] - `constexpr` function return incorrect value in Windows +* [[#11749](https://github.com/root-project/root/issues/11749)] - Remove empty files from the source distribution tarball +* [[#11707](https://github.com/root-project/root/issues/11707)] - Crash when macro is named main.cpp +* [[#11603](https://github.com/root-project/root/issues/11603)] - Disable automatic 'call home' in cmake when not needed +* [[#11353](https://github.com/root-project/root/issues/11353)] - Compiled program with libNew.so crash +* [[#10317](https://github.com/root-project/root/issues/10317)] - [Doxygen] tutorials appear as namespaces +* [[#10239](https://github.com/root-project/root/issues/10239)] - ? wildcard broken in TChain::Add() +* [[#10010](https://github.com/root-project/root/issues/10010)] - TLeaf::ReadBasket invalid write in TMVA test +* [[#9792](https://github.com/root-project/root/issues/9792)] - should fLogger be persistant ? +* [[#9646](https://github.com/root-project/root/issues/9646)] - Numerically stable computation of invariant mass +* [[#9637](https://github.com/root-project/root/issues/9637)] - `TGraph::Add(TF1 *f)` method like for `TH1`'s +* [[#9445](https://github.com/root-project/root/issues/9445)] - Hit errors when build ROOT with msvc on AddressSanitizer mode +* [[#9425](https://github.com/root-project/root/issues/9425)] - [RF] Figure out how to handle RooArgList with duplicates and hash-assisted find +* [[#9188](https://github.com/root-project/root/issues/9188)] - Unnecessary (?) warnings reading `unique_ptr` +* [[#9137](https://github.com/root-project/root/issues/9137)] - [tree] TTree/TChain silently return bogus data if friend is shorter than main tree +* [[#8833](https://github.com/root-project/root/issues/8833)] - Crash reading >= 3D array in TTree via MakeClass in Windows ROOT6 compilation +* [[#8828](https://github.com/root-project/root/issues/8828)] - Crash when defining something in the Detail namespace after a lookup of that namespace +* [[#8815](https://github.com/root-project/root/issues/8815)] - TBB not inheriting CXXFLAGS +* [[#8716](https://github.com/root-project/root/issues/8716)] - Minuit2: FCNGradientBase::CheckGradient() is ignored +* [[#8704](https://github.com/root-project/root/issues/8704)] - [DF] Add support for 'missing' columns +* [[#8367](https://github.com/root-project/root/issues/8367)] - *** Break *** segmentation violation in case of compilation errors in unnamed macros +* [[#8194](https://github.com/root-project/root/issues/8194)] - TClass::GetStreamerInfo crashes for several classes +* [[#8031](https://github.com/root-project/root/issues/8031)] - Reserve "build" directory name in ROOT sources for build files +* [[#7875](https://github.com/root-project/root/issues/7875)] - [ntuple] Improve normalization of platform-specific primitives and typedefs +* [[#7823](https://github.com/root-project/root/issues/7823)] - [RF] RooStatsUtils::MakeCleanWorkspace +* [[#7713](https://github.com/root-project/root/issues/7713)] - [Tree] Bogus data silently read when trying to access an indexed friend TTree with an invalid index +* [[#7160](https://github.com/root-project/root/issues/7160)] - MacOS: -Dcocoa=ON -Dopengl=OFF pass cmake but fail compilation +* [[#7103](https://github.com/root-project/root/issues/7103)] - [RF] HistFactory::FlexibleInterpVar Interpolation code2 and code3 are the same +* [[ROOT-10975](https://its.cern.ch/jira/browse/ROOT-10975)] - ACLiC should make rootcling warnings visible +* [[ROOT-10908](https://its.cern.ch/jira/browse/ROOT-10908)] - SMatrix is written as a Double32_t +* [[ROOT-10902](https://its.cern.ch/jira/browse/ROOT-10902)] - SMatrix read from TTree contains all zeroes +* [[ROOT-10883](https://its.cern.ch/jira/browse/ROOT-10883)] - Warning in TBrowser when selecting "Add" method of a histogram +* [[ROOT-10865](https://its.cern.ch/jira/browse/ROOT-10865)] - [RVec] No Doxygen documentation about arithmetic operators +* [[ROOT-10698](https://its.cern.ch/jira/browse/ROOT-10698)] - Valgrind dies at assertion ‘!overlap’ failed +* [[ROOT-10539](https://its.cern.ch/jira/browse/ROOT-10539)] - Slow tutorials/dataframe/df027_SQliteDependencyOverVersion.C +* [[ROOT-10414](https://its.cern.ch/jira/browse/ROOT-10414)] - rootcling doesn't parse -isystem correctly +* [[ROOT-10342](https://its.cern.ch/jira/browse/ROOT-10342)] - valuePrint 'forgets' template argument in type when printing about an assignment statement. +* [[ROOT-10200](https://its.cern.ch/jira/browse/ROOT-10200)] - Automatic reloading doesn't work for std::cout on macOS +* [[ROOT-9961](https://its.cern.ch/jira/browse/ROOT-9961)] - TTree::Print("toponly") inserts extra newline between listed items +* [[ROOT-9953](https://its.cern.ch/jira/browse/ROOT-9953)] - TRint should not terminate on assert +* [[ROOT-9919](https://its.cern.ch/jira/browse/ROOT-9919)] - TFile construction silently drops XRootD protocol +* [[ROOT-9918](https://its.cern.ch/jira/browse/ROOT-9918)] - Crash TMVA by running (unused?) public function from TMVA::Factory +* [[ROOT-9705](https://its.cern.ch/jira/browse/ROOT-9705)] - flag to disable (root)test(s) that uses remote files +* [[ROOT-9673](https://its.cern.ch/jira/browse/ROOT-9673)] - Printout from TMinuit::mnrazz() can not be suppressed +* [[ROOT-9448](https://its.cern.ch/jira/browse/ROOT-9448)] - libNew returns nullptr instead of implementing operator new, has many warnings +* [[ROOT-9420](https://its.cern.ch/jira/browse/ROOT-9420)] - CTest: Fail on warnings in tutorials +* [[ROOT-9395](https://its.cern.ch/jira/browse/ROOT-9395)] - ROOTTEST_ADD_TEST does not complain if source file does not exist +* [[ROOT-9354](https://its.cern.ch/jira/browse/ROOT-9354)] - [TTreeReader] Crash when reading array from in-memory tree +* [[ROOT-9266](https://its.cern.ch/jira/browse/ROOT-9266)] - Cannot unload python code / shared library +* [[ROOT-8991](https://its.cern.ch/jira/browse/ROOT-8991)] - Cling exports buggy include paths to AcLIC +* [[ROOT-8775](https://its.cern.ch/jira/browse/ROOT-8775)] - TTree::MakeSelector can produce invalid C++ code +* [[ROOT-8745](https://its.cern.ch/jira/browse/ROOT-8745)] - Reloading of code that uses R__LOAD_LIBRARY fails +* [[ROOT-8519](https://its.cern.ch/jira/browse/ROOT-8519)] - Bug when use simple math functions in TTree::SetAlias() +* [[ROOT-8271](https://its.cern.ch/jira/browse/ROOT-8271)] - roofit asymmetry plots create corrupted pdf when not providing a custom binning +* [[ROOT-8256](https://its.cern.ch/jira/browse/ROOT-8256)] - Limit to complexity of TTreeFormula? - "Bad Numerical Expression" +* [[ROOT-8240](https://its.cern.ch/jira/browse/ROOT-8240)] - Must not unload or reload cling runtime universe +* [[ROOT-8078](https://its.cern.ch/jira/browse/ROOT-8078)] - Tab completion fails for lambda functions +* [[ROOT-7137](https://its.cern.ch/jira/browse/ROOT-7137)] - Unsafe object ownership issue with TClonesArray/TObjArray +* [[ROOT-6968](https://its.cern.ch/jira/browse/ROOT-6968)] - Interpretation of nparam argument to TMethodCall::SetParamPtrs changed in root 6 +* [[ROOT-6931](https://its.cern.ch/jira/browse/ROOT-6931)] - Tab completion of file names in directories with '+' +* [[ROOT-6822](https://its.cern.ch/jira/browse/ROOT-6822)] - Dangerous behavior of TTreeFormula::EvalInstance64 +* [[ROOT-6313](https://its.cern.ch/jira/browse/ROOT-6313)] - TClingClassInfo::ClassProperty() might give wrong results +* [[ROOT-5983](https://its.cern.ch/jira/browse/ROOT-5983)] - Add test for wrong data member in TBranchElement +* [[ROOT-5963](https://its.cern.ch/jira/browse/ROOT-5963)] - Re-implement tab completion for ROOT +* [[ROOT-5843](https://its.cern.ch/jira/browse/ROOT-5843)] - List of loaded libraries +* [[ROOT-5439](https://its.cern.ch/jira/browse/ROOT-5439)] - Dump-output of TH1 not showing pointerness of fArray +* [[ROOT-2345](https://its.cern.ch/jira/browse/ROOT-2345)] - Optimize TMatrixDSparse operation kAtA + +## Release 6.34.02 + +Published on December 16, 2024 + +### Items addressed in this release + +This release includes a few minor fixes in RDataFrame and RooFit, besides the items below. Moreover, built-in Davix was patched to build with GCC14 while waiting for the new Davix release. + +* [[#17145](https://github.com/root-project/root/issues/17145)] - Distributed RDataFrame cannot deal with same column name in different branches +- [[#17190](https://github.com/root-project/root/issues/17190)] - Compiler error with GCC 14.2.0 related to Davix +* [[#17222](https://github.com/root-project/root/issues/17222)] - Regression in Python ownership for histograms within subdirectories with ROOT 6.34.00 +- [[#17223](https://github.com/root-project/root/issues/17223)] - TFileMerger leaves files open resulting in corrupt metadata + +## Release 6.34.04 + +Published on February 10, 2025 + +### Items addressed in this release + +This branch includes a few minor fixes in RDataFrame, as well as a mechanism was added to specify binding options when opening sockets with TSocketServer. +Moreover, the items listed below were addressed: + +* [[ROOT-7372](https://its.cern.ch/jira/browse/ROOT-7372)] - Accessing complex map branches crashes in PyROOT +* [[ROOT-10482](https://its.cern.ch/jira/browse/ROOT-10482)] - pullHist and residHist biased +* [[#12841](https://github.com/root-project/root/issues/12841)] - [ntuple] prefer IO constructor in RField::GenerateValue() +* [[#14007](https://github.com/root-project/root/issues/14007)] - Cannot create a RNtuple into a TDirectory +* [[#15473](https://github.com/root-project/root/issues/15473)] - Segmentation fault when building with the mold linker +* [[#16189](https://github.com/root-project/root/issues/16189)] - TFile::k630forwardCompatibility does not apply to new files correctly +* [[#16560](https://github.com/root-project/root/issues/16560)] - Issue using TColor and saving canvas to ROOT format +* [[#17291](https://github.com/root-project/root/issues/17291)] - [RF] Parameter ordering bug in RooFormulaArgStreamer +* [[#17305](https://github.com/root-project/root/issues/17305)] - The ONNX.Tile5D test in tmva/sofie/test/TestCustomModelsFromONNX.cxx writes array elements beyond the last element in the array +* [[#17321](https://github.com/root-project/root/issues/17321)] - [RF] Unused Class rule +* [[#17442](https://github.com/root-project/root/issues/17442)] - [Python] Regression in `std::bytes` support with cppyy inside ROOT +* [[#17444](https://github.com/root-project/root/issues/17444)] - ROOT doesn't compile with gcc-15 +* [[#17472](https://github.com/root-project/root/issues/17472)] - RooEllipse not drawn in notebooks with `%jsroot on` + +## HEAD of the v6-34-00-patches branch \ No newline at end of file diff --git a/bindings/experimental/distrdf/python/DistRDF/HeadNode.py b/bindings/experimental/distrdf/python/DistRDF/HeadNode.py index ab7cc9cdde7fb..69ec035624210 100644 --- a/bindings/experimental/distrdf/python/DistRDF/HeadNode.py +++ b/bindings/experimental/distrdf/python/DistRDF/HeadNode.py @@ -105,7 +105,7 @@ def __init__(self, backend: BaseBackend, npartitions: Optional[int], localdf: RO # Internal RDataFrame object, useful to expose information such as # column names. - self._localdf = localdf + self.rdf_node = localdf # A dictionary where the keys are the IDs of the objects to live visualize # and the values are the corresponding callback functions @@ -119,8 +119,8 @@ def __del__(self): the garbage collector, the cppyy memory regulator and the C++ object destructor. """ - if hasattr(self, "_localdf"): - del self._localdf + if hasattr(self, "rdf_node"): + del self.rdf_node @property def npartitions(self) -> Optional[int]: diff --git a/bindings/experimental/distrdf/python/DistRDF/Node.py b/bindings/experimental/distrdf/python/DistRDF/Node.py index 7caf8c3df9bb7..fd85564dd2fa0 100644 --- a/bindings/experimental/distrdf/python/DistRDF/Node.py +++ b/bindings/experimental/distrdf/python/DistRDF/Node.py @@ -59,9 +59,12 @@ class Node(object): rdf_node: A reference to the result of calling a function of the RDataFrame API with the current operation. This is practically a node of the true computation graph, which is being executed in some - distributed task. It is a transient attribute. On the client, it - is always None. The value is computed and stored only during a task - on a worker. + distributed task. It is a transient attribute. On the client, this + is filled when the operation being called is a transformation. It is + done to ensure information regarding e.g. column names and types is + populated and available locally with the right dependencies. On a + worker, this attribute can represent any node of the C++ computation + graph and its created and processed within the worker. """ def __init__(self, get_head: Callable[[], HeadNode], node_id: int = 0, diff --git a/bindings/experimental/distrdf/python/DistRDF/Proxy.py b/bindings/experimental/distrdf/python/DistRDF/Proxy.py index c8a8ccfc9a92d..21bf9555302b8 100644 --- a/bindings/experimental/distrdf/python/DistRDF/Proxy.py +++ b/bindings/experimental/distrdf/python/DistRDF/Proxy.py @@ -59,8 +59,12 @@ def execute_graph(node: Node) -> None: def _update_internal_df_with_transformation(node:Node, operation: Operation) -> None: """Propagate transform operations to the headnode internal RDataFrame""" - rdf_operation = getattr(node.get_head()._localdf, operation.name) - node.get_head()._localdf = rdf_operation(*operation.args, **operation.kwargs) + # The parent node is None only if the node is the head node + parent_node = node.parent if node.parent is not None else node + # Retrieve correct C++ transformation to call + rdf_operation = getattr(parent_node.rdf_node, operation.name) + # Call and inject the result in the Python node + node.rdf_node = rdf_operation(*operation.args, **operation.kwargs) def _create_new_node(parent: Node, operation: Operation.Operation) -> Node: """Creates a new node and inserts it in the computation graph""" @@ -253,11 +257,11 @@ def __getattr__(self, attr): def GetColumnNames(self): """Forward call to the internal RDataFrame object""" - return self.proxied_node.get_head()._localdf.GetColumnNames() + return self.proxied_node.rdf_node.GetColumnNames() def GetColumnType(self, column): """Forward call to the internal RDataFrame object""" - return self.proxied_node.get_head()._localdf.GetColumnType(column) + return self.proxied_node.rdf_node.GetColumnType(column) def _create_new_op(self, *args, **kwargs): """ diff --git a/bindings/experimental/distrdf/test/test_callable_generator.py b/bindings/experimental/distrdf/test/test_callable_generator.py index 5fe9d53fa8893..a4911aaa28642 100644 --- a/bindings/experimental/distrdf/test/test_callable_generator.py +++ b/bindings/experimental/distrdf/test/test_callable_generator.py @@ -73,10 +73,10 @@ def test_mapper_from_graph(self): node = Proxy.NodeProxy(hn) # Set of operations to build the graph n1 = node.Define("mock_col", "1") - n2 = node.Filter("mock_col>0").Filter("mock_col>0") + n2 = n1.Filter("mock_col>0").Filter("mock_col>0") n4 = n2.Count() n5 = n1.Count() - n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841 + n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 # Generate and execute the mapper graph_dict = hn._generate_graph_dict() @@ -107,10 +107,10 @@ def test_mapper_with_pruning(self): # Set of operations to build the graph n1 = node.Define("mock_col", "1") - n2 = node.Filter("mock_col>0").Filter("mock_col>0") + n2 = n1.Filter("mock_col>0").Filter("mock_col>0") n4 = n2.Count() n5 = n1.Count() - n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841 + n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 # Until here the graph would be: # [1, 2, 2, 3, 3, 2] @@ -152,11 +152,11 @@ def test_dfs_graph_with_pruning_transformations(self): # Graph nodes n1 = node.Define("mock_col", "1") - n2 = node.Filter("mock_col>0") + n2 = n1.Filter("mock_col>0") n3 = n2.Filter("mock_col>0") n4 = n3.Count() # noqa: avoid PEP8 F841 n5 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 - n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841 + n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 # Transformation pruning, n5 was earlier a transformation node n5 = n1.Count() # noqa: avoid PEP8 F841 @@ -189,11 +189,11 @@ def test_dfs_graph_with_recursive_pruning(self): # Graph nodes n1 = node.Define("mock_col", "1") - n2 = node.Filter("mock_col>0") + n2 = n1.Filter("mock_col>0") n3 = n2.Filter("mock_col>0") n4 = n3.Count() # noqa: avoid PEP8 F841 n5 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 - n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841 + n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 # Remove user references from n4, n3, n2 n4 = n3 = n2 = None # noqa: avoid PEP8 F841 @@ -226,11 +226,11 @@ def test_dfs_graph_with_parent_pruning(self): # Graph nodes n1 = node.Define("mock_col", "1") - n2 = node.Filter("mock_col>0") + n2 = n1.Filter("mock_col>0") n3 = n2.Filter("mock_col>0") n4 = n3.Count() # noqa: avoid PEP8 F841 n5 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 - n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841 + n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 # Remove references from n2 (which shouldn't affect the graph) n2 = None @@ -265,12 +265,12 @@ def test_dfs_graph_with_computed_values_pruning(self): # Graph nodes n1 = node.Define("mock_col", "1") - n2 = node.Filter("mock_col>0") + n2 = n1.Filter("mock_col>0") n3 = n2.Filter("mock_col>0") n4 = n3.Count() # noqa: avoid PEP8 F841 n5 = n1.Filter("mock_col>0") n6 = n5.Count() - n7 = node.Filter("mock_col>0") + n7 = n1.Filter("mock_col>0") # This is to make sure action nodes with # already computed values are pruned. @@ -307,11 +307,11 @@ def test_dfs_graph_without_pruning(self): # Graph nodes n1 = node.Define("mock_col", "1") - n2 = node.Filter("mock_col>0") + n2 = n1.Filter("mock_col>0") n3 = n2.Filter("mock_col>0") n4 = n3.Count() # noqa: avoid PEP8 F841 n5 = n1.Count() # noqa: avoid PEP8 F841 - n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841 + n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841 # Generate and execute the mapper graph_dict = hn._generate_graph_dict() @@ -340,7 +340,7 @@ def test_nodes_gt_python_recursion_limit(self): node = Proxy.NodeProxy(hn) # Create three branches n1 = node.Define("mock_col", "1") - n2 = node.Filter("mock_col>0") + n2 = n1.Filter("mock_col>0") # Append 1000 nodes per branch for i in range(1000): n1 = n1.Define(f"mock_col_{i}", "1") diff --git a/bindings/experimental/distrdf/test/test_proxy.py b/bindings/experimental/distrdf/test/test_proxy.py index 2a780abdf56d3..45bab8ac86d86 100644 --- a/bindings/experimental/distrdf/test/test_proxy.py +++ b/bindings/experimental/distrdf/test/test_proxy.py @@ -109,12 +109,14 @@ def test_supported_transformation(self): } for transformation, args in transformations.items(): - newProxy = getattr(proxy, transformation)(*args) - self.assertEqual(proxy.proxied_node._new_op_name, transformation) - self.assertIsInstance(newProxy, Proxy.NodeProxy) - self.assertEqual(newProxy.proxied_node.operation.name, + parent_node = proxy.proxied_node + proxy = getattr(proxy, transformation)(*args) + # Calling the operation on the parent node modifies an attribute + self.assertEqual(parent_node._new_op_name, transformation) + self.assertIsInstance(proxy, Proxy.NodeProxy) + self.assertEqual(proxy.proxied_node.operation.name, transformation) - self.assertEqual(newProxy.proxied_node.operation.args, args) + self.assertEqual(proxy.proxied_node.operation.args, args) def test_node_attr_transformation(self): """ @@ -304,4 +306,31 @@ def test_get_column_type_after_define(self): column_types.append(column_type) self.assertSequenceEqual(column_types, ["double", "int"]) - \ No newline at end of file + + def test_columninfo_defines_twobranches(self): + """ + Check new column names and types are available locally even if the same + column name is used in different branches of the computation graph. + """ + + node = create_dummy_headnode(1) + proxy = Proxy.NodeProxy(node) + + cols_before = proxy.GetColumnNames() + self.assertSequenceEqual(cols_before, []) + + expected_coltype_1 = "Long64_t" + branch_1 = proxy.Define("mycol", f"static_cast<{expected_coltype_1}>(42)") + + expected_coltype_2 = "float" + branch_2 = proxy.Define("mycol", f"static_cast<{expected_coltype_2}>(33)") + + cols_1 = branch_1.GetColumnNames() + self.assertSequenceEqual(cols_1, ["mycol"]) + coltype_1 = branch_1.GetColumnType(cols_1[0]) + self.assertEqual(coltype_1, expected_coltype_1) + + cols_2 = branch_2.GetColumnNames() + self.assertSequenceEqual(cols_2, ["mycol"]) + coltype_2 = branch_2.GetColumnType(cols_2[0]) + self.assertEqual(coltype_2, expected_coltype_2) diff --git a/bindings/jupyroot/python/JupyROOT/helpers/utils.py b/bindings/jupyroot/python/JupyROOT/helpers/utils.py index 82836a07951b3..3552974f58d69 100644 --- a/bindings/jupyroot/python/JupyROOT/helpers/utils.py +++ b/bindings/jupyroot/python/JupyROOT/helpers/utils.py @@ -74,7 +74,7 @@ // We are in jupyter notebooks, use require.js which should be configured already requirejs.config({{ - paths: {{ 'JSRootCore' : [ 'build/jsroot', 'https://root.cern/js/7.7.4/build/jsroot', 'https://jsroot.gsi.de/7.7.4/build/jsroot' ] }} + paths: {{ 'JSRootCore' : [ 'build/jsroot', 'https://root.cern/js/7.8.1/build/jsroot', 'https://jsroot.gsi.de/7.8.1/build/jsroot' ] }} }})(['JSRootCore'], function(Core) {{ display_{jsDivId}(Core); }}); @@ -97,7 +97,7 @@ // Try loading a local version of requirejs and fallback to cdn if not possible. script_load_{jsDivId}(base_url + 'static/build/jsroot.js', function(){{ console.error('Fail to load JSROOT locally, please check your jupyter_notebook_config.py file'); - script_load_{jsDivId}('https://root.cern/js/7.7.4/build/jsroot.js', function(){{ + script_load_{jsDivId}('https://root.cern/js/7.8.1/build/jsroot.js', function(){{ document.getElementById("{jsDivId}").innerHTML = "Failed to load JSROOT"; }}); }}); diff --git a/bindings/pyroot/cppyy/CPyCppyy/include/CPyCppyy/DispatchPtr.h b/bindings/pyroot/cppyy/CPyCppyy/include/CPyCppyy/DispatchPtr.h index 760443c17380b..bd098f6917fa0 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/include/CPyCppyy/DispatchPtr.h +++ b/bindings/pyroot/cppyy/CPyCppyy/include/CPyCppyy/DispatchPtr.h @@ -64,7 +64,7 @@ class CPYCPPYY_CLASS_EXTERN DispatchPtr { } private: - PyObject* Get() const; + PyObject* Get(bool borrowed=true) const; private: PyObject* fPyHardRef; diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/API.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/API.cxx index f1e4a336bd825..4c28b7a91ff2c 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/API.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/API.cxx @@ -413,7 +413,16 @@ void CPyCppyy::ExecScript(const std::string& name, const std::vectortp_new((PyTypeObject*)pytype, args, nullptr); @@ -133,7 +140,7 @@ static PyObject* enum_ctype(PyObject* cls, PyObject* args, PyObject* kwds) CPyCppyy::CPPEnum* CPyCppyy::CPPEnum_New(const std::string& name, Cppyy::TCppScope_t scope) { // Create a new enum type based on the actual C++ type. Enum values are added to -// the type by may also live in the enclosing scope. +// the type but may also live in the enclosing scope. CPPEnum* pyenum = nullptr; @@ -190,8 +197,13 @@ CPyCppyy::CPPEnum* CPyCppyy::CPPEnum_New(const std::string& name, Cppyy::TCppSco // collect the enum values Cppyy::TCppIndex_t ndata = Cppyy::GetNumEnumData(etype); + bool values_ok = true; for (Cppyy::TCppIndex_t idata = 0; idata < ndata; ++idata) { PyObject* val = pyval_from_enum(resolved, pyenum, pyside_type, etype, idata); + if (!val) { + values_ok = false; + break; + } PyObject* pydname = CPyCppyy_PyText_FromString(Cppyy::GetEnumDataName(etype, idata).c_str()); PyObject_SetAttr(pyenum, pydname, val); PyObject_SetAttr(val, PyStrings::gCppName, pydname); @@ -206,6 +218,13 @@ CPyCppyy::CPPEnum* CPyCppyy::CPPEnum_New(const std::string& name, Cppyy::TCppSco Py_DECREF(args); Py_DECREF(pymeta); + if (!values_ok) { + if (!PyErr_Occurred()) + PyErr_SetString(PyExc_ValueError, "could not set some of the enum values"); + Py_DECREF(pyenum); + return nullptr; + } + } else { // presumably not a class enum; simply pretend int Py_INCREF(&PyInt_Type); diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/CPPScope.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/CPPScope.cxx index 40f4e0648af9a..144f0605d0c84 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/CPPScope.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/CPPScope.cxx @@ -479,12 +479,14 @@ static PyObject* meta_getattro(PyObject* pyclass, PyObject* pyname) // try all outstanding using namespaces in turn to find the attribute (will cache // locally later; TODO: doing so may cause pathological cases) for (auto pyref : *klass->fImp.fUsing) { - PyObject* pyuscope = PyWeakref_GetObject(pyref); + PyObject* pyuscope = CPyCppyy_GetWeakRef(pyref); if (pyuscope) { attr = PyObject_GetAttr(pyuscope, pyname); - if (attr) break; - PyErr_Clear(); + if (!attr) PyErr_Clear(); + Py_DECREF(pyuscope); } + if (attr) + break; } } diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/CPyCppyy.h b/bindings/pyroot/cppyy/CPyCppyy/src/CPyCppyy.h index d0f3b0fc5621a..e3244bdcfe74b 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/CPyCppyy.h +++ b/bindings/pyroot/cppyy/CPyCppyy/src/CPyCppyy.h @@ -351,8 +351,26 @@ inline PyObject* CPyCppyy_tp_call(PyObject* cb, PyObject* args, size_t, PyObject } #endif +// weakref forced strong reference +#if PY_VERSION_HEX < 0x30d0000 +static inline PyObject* CPyCppyy_GetWeakRef(PyObject* ref) { + PyObject* pyobject = PyWeakref_GetObject(ref); + if (!pyobject || pyobject == Py_None) + return nullptr; + Py_INCREF(pyobject); + return pyobject; +} +#else +static inline PyObject* CPyCppyy_GetWeakRef(PyObject* ref) { + PyObject* pyobject = nullptr; + if (PyWeakref_GetRef(ref, &pyobject) != -1) + return pyobject; + return nullptr; +} +#endif + // Py_TYPE as inline function -#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE) +#if PY_VERSION_HEX < 0x03090000 && !defined(Py_SET_TYPE) static inline void _Py_SET_TYPE(PyObject *ob, PyTypeObject *type) { ob->ob_type = type; } #define Py_SET_TYPE(ob, type) _Py_SET_TYPE((PyObject*)(ob), type) diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx index 0a8bce369233f..fafd1cc7a70de 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx @@ -203,6 +203,7 @@ static bool IsPyCArgObject(PyObject* pyobject) return Py_TYPE(pyobject) == pycarg_type; } +#if PY_VERSION_HEX < 0x30d0000 static bool IsCTypesArrayOrPointer(PyObject* pyobject) { static PyTypeObject* cstgdict_type = nullptr; @@ -219,6 +220,43 @@ static bool IsCTypesArrayOrPointer(PyObject* pyobject) return true; return false; } +#else +// the internals of ctypes have been redone, requiring a more complex checking +namespace { + +typedef struct { + PyTypeObject *DictRemover_Type; + PyTypeObject *PyCArg_Type; + PyTypeObject *PyCField_Type; + PyTypeObject *PyCThunk_Type; + PyTypeObject *StructParam_Type; + PyTypeObject *PyCType_Type; + PyTypeObject *PyCStructType_Type; + PyTypeObject *UnionType_Type; + PyTypeObject *PyCPointerType_Type; +// ... unused fields omitted ... +} _cppyy_ctypes_state; + +} // unnamed namespace + +static bool IsCTypesArrayOrPointer(PyObject* pyobject) +{ + static _cppyy_ctypes_state* state = nullptr; + if (!state) { + PyObject* ctmod = PyImport_AddModule("_ctypes"); // the extension module, not the Python one + if (ctmod) + state = (_cppyy_ctypes_state*)PyModule_GetState(ctmod); + } + + // verify for object types that have a C payload + if (state && (PyObject_IsInstance((PyObject*)Py_TYPE(pyobject), (PyObject*)state->PyCType_Type) || + PyObject_IsInstance((PyObject*)Py_TYPE(pyobject), (PyObject*)state->PyCPointerType_Type))) { + return true; + } + + return false; +} +#endif //- helper to establish life lines ------------------------------------------- @@ -3475,8 +3513,11 @@ static struct InitConvFactories_t { gf["const signed char&"] = gf["const char&"]; #if __cplusplus > 201402L gf["std::byte"] = gf["uint8_t"]; + gf["byte"] = gf["uint8_t"]; gf["const std::byte&"] = gf["const uint8_t&"]; + gf["const byte&"] = gf["const uint8_t&"]; gf["std::byte&"] = gf["uint8_t&"]; + gf["byte&"] = gf["uint8_t&"]; #endif gf["std::int8_t"] = gf["int8_t"]; gf["const std::int8_t&"] = gf["const int8_t&"]; diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/DispatchPtr.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/DispatchPtr.cxx index 43b73fb8f107c..5affdd2120317 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/DispatchPtr.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/DispatchPtr.cxx @@ -8,13 +8,18 @@ //----------------------------------------------------------------------------- -PyObject* CPyCppyy::DispatchPtr::Get() const +PyObject* CPyCppyy::DispatchPtr::Get(bool borrowed) const { - if (fPyHardRef) return fPyHardRef; + if (fPyHardRef) { + if (!borrowed) Py_INCREF(fPyHardRef); + return fPyHardRef; + } if (fPyWeakRef) { - PyObject* disp = PyWeakref_GetObject(fPyWeakRef); - if (disp != Py_None) // dispatcher object disappeared? + PyObject* disp = CPyCppyy_GetWeakRef(fPyWeakRef); + if (disp) { // dispatcher object disappeared? + if (borrowed) Py_DECREF(disp); return disp; + } } return nullptr; } @@ -36,9 +41,10 @@ CPyCppyy::DispatchPtr::DispatchPtr(PyObject* pyobj, bool strong) : fPyHardRef(nu //----------------------------------------------------------------------------- CPyCppyy::DispatchPtr::DispatchPtr(const DispatchPtr& other, void* cppinst) : fPyWeakRef(nullptr) { - PyObject* pyobj = other.Get(); + PyObject* pyobj = other.Get(false /* not borrowed */); fPyHardRef = pyobj ? (PyObject*)((CPPInstance*)pyobj)->Copy(cppinst) : nullptr; if (fPyHardRef) ((CPPInstance*)fPyHardRef)->SetDispatchPtr(this); + Py_XDECREF(pyobj); } //----------------------------------------------------------------------------- @@ -48,9 +54,10 @@ CPyCppyy::DispatchPtr::~DispatchPtr() { // is "notified" by nulling out the reference and an exception will be raised on // continued access if (fPyWeakRef) { - PyObject* pyobj = PyWeakref_GetObject(fPyWeakRef); - if (pyobj && pyobj != Py_None && ((CPPScope*)Py_TYPE(pyobj))->fFlags & CPPScope::kIsPython) + PyObject* pyobj = CPyCppyy_GetWeakRef(fPyWeakRef); + if (pyobj && ((CPPScope*)Py_TYPE(pyobj))->fFlags & CPPScope::kIsPython) ((CPPInstance*)pyobj)->GetObjectRaw() = nullptr; + Py_XDECREF(pyobj); Py_DECREF(fPyWeakRef); } else if (fPyHardRef) { ((CPPInstance*)fPyHardRef)->GetObjectRaw() = nullptr; @@ -64,9 +71,10 @@ CPyCppyy::DispatchPtr& CPyCppyy::DispatchPtr::assign(const DispatchPtr& other, v if (this != &other) { Py_XDECREF(fPyWeakRef); fPyWeakRef = nullptr; Py_XDECREF(fPyHardRef); - PyObject* pyobj = other.Get(); + PyObject* pyobj = other.Get(false /* not borrowed */); fPyHardRef = pyobj ? (PyObject*)((CPPInstance*)pyobj)->Copy(cppinst) : nullptr; if (fPyHardRef) ((CPPInstance*)fPyHardRef)->SetDispatchPtr(this); + Py_XDECREF(pyobj); } return *this; } @@ -86,9 +94,7 @@ void CPyCppyy::DispatchPtr::CppOwns() { // C++ maintains the hardref, keeping the PyObject alive w/o outstanding ref if (fPyWeakRef) { - fPyHardRef = PyWeakref_GetObject(fPyWeakRef); - if (fPyHardRef == Py_None) fPyHardRef = nullptr; - Py_XINCREF(fPyHardRef); + fPyHardRef = CPyCppyy_GetWeakRef(fPyWeakRef); Py_DECREF(fPyWeakRef); fPyWeakRef = nullptr; } } diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx index cdef2b8c7b0de..06731d6d85d78 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx @@ -484,7 +484,12 @@ bool CPyCppyy::InsertDispatcher(CPPScope* klass, PyObject* bases, PyObject* dct, // Python class to keep the inheritance tree intact) for (const auto& name : protected_names) { PyObject* disp_dct = PyObject_GetAttr(disp_proxy, PyStrings::gDict); +#if PY_VERSION_HEX < 0x30d00f0 PyObject* pyf = PyMapping_GetItemString(disp_dct, (char*)name.c_str()); +#else + PyObject* pyf = nullptr; + PyMapping_GetOptionalItemString(disp_dct, (char*)name.c_str(), &pyf); +#endif if (pyf) { PyObject_SetAttrString((PyObject*)klass, (char*)name.c_str(), pyf); Py_DECREF(pyf); diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx index dd6b71d1d504e..16a1b31de7594 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx @@ -1036,6 +1036,8 @@ struct InitExecFactories_t { #if __cplusplus > 201402L gf["std::byte ptr"] = (ef_t)+[](cdims_t d) { return new ByteArrayExecutor{d}; }; gf["const std::byte ptr"] = gf["std::byte ptr"]; + gf["byte ptr"] = gf["std::byte ptr"]; + gf["const byte ptr"] = gf["std::byte ptr"]; #endif gf["int8_t ptr"] = (ef_t)+[](cdims_t d) { return new Int8ArrayExecutor{d}; }; gf["uint8_t ptr"] = (ef_t)+[](cdims_t d) { return new UInt8ArrayExecutor{d}; }; @@ -1060,8 +1062,11 @@ struct InitExecFactories_t { gf["internal_enum_type_t ptr"] = gf["int ptr"]; #if __cplusplus > 201402L gf["std::byte"] = gf["uint8_t"]; + gf["byte"] = gf["uint8_t"]; gf["std::byte&"] = gf["uint8_t&"]; + gf["byte&"] = gf["uint8_t&"]; gf["const std::byte&"] = gf["const uint8_t&"]; + gf["const byte&"] = gf["const uint8_t&"]; #endif gf["std::int8_t"] = gf["int8_t"]; gf["std::int8_t&"] = gf["int8_t&"]; diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/LowLevelViews.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/LowLevelViews.cxx index a58e7bed5c47b..0f31cb8bfa58a 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/LowLevelViews.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/LowLevelViews.cxx @@ -817,38 +817,62 @@ static PyObject* ll_reshape(CPyCppyy::LowLevelView* self, PyObject* shape) //--------------------------------------------------------------------------- -static PyObject* ll_array(CPyCppyy::LowLevelView* self, PyObject* args, PyObject* /* kwds */) +static PyObject* ll_array(CPyCppyy::LowLevelView* self, PyObject* args, PyObject* kwds) { // Construct a numpy array from the lowlevelview (w/o copy if possible); this // uses the Python methods to avoid depending on numpy directly // Expect as most a dtype from the arguments; - static PyObject* ctmod = PyImport_ImportModule("numpy"); // ref-count kept - if (!ctmod) + static PyObject* npmod = PyImport_ImportModule("numpy"); // ref-count kept + if (!npmod) return nullptr; -// expect possible dtype from the arguments, otherwie take it from the type code - PyObject* dtype; - if (!args || PyTuple_GET_SIZE(args) != 1) { - PyObject* npdtype = PyObject_GetAttr(ctmod, CPyCppyy::PyStrings::gDType); - PyObject* typecode = ll_typecode(self, nullptr); - dtype = PyObject_CallFunctionObjArgs(npdtype, typecode, nullptr); - Py_DECREF(typecode); - Py_DECREF(npdtype); - } else { - dtype = PyTuple_GET_ITEM(args, 0); - Py_INCREF(dtype); + bool docopy = false; + if (kwds) { + PyObject* pycp = PyObject_GetItem(kwds, CPyCppyy::PyStrings::gCopy); + if (!pycp) { + PyErr_SetString(PyExc_TypeError, "__array__ only supports the \"copy\" keyword"); + return nullptr; + } + + docopy = PyObject_IsTrue(pycp); + Py_DECREF(pycp); } - if (!dtype) - return nullptr; + if (!docopy) { // view requested + // expect possible dtype from the arguments, otherwise take it from the type code + PyObject* dtype; + if (!args || PyTuple_GET_SIZE(args) != 1) { + PyObject* npdtype = PyObject_GetAttr(npmod, CPyCppyy::PyStrings::gDType); + PyObject* typecode = ll_typecode(self, nullptr); + dtype = PyObject_CallFunctionObjArgs(npdtype, typecode, nullptr); + Py_DECREF(typecode); + Py_DECREF(npdtype); + } else { + dtype = PyTuple_GET_ITEM(args, 0); + Py_INCREF(dtype); + } - PyObject* npfrombuf = PyObject_GetAttr(ctmod, CPyCppyy::PyStrings::gFromBuffer); - PyObject* view = PyObject_CallFunctionObjArgs(npfrombuf, (PyObject*)self, dtype, nullptr); - Py_DECREF(dtype); - Py_DECREF(npfrombuf); + if (!dtype) + return nullptr; + + PyObject* npfrombuf = PyObject_GetAttr(npmod, CPyCppyy::PyStrings::gFromBuffer); + PyObject* view = PyObject_CallFunctionObjArgs(npfrombuf, (PyObject*)self, dtype, nullptr); + Py_DECREF(dtype); + Py_DECREF(npfrombuf); + + return view; + + } else { // copy requested + PyObject* npcopy = PyObject_GetAttr(npmod, CPyCppyy::PyStrings::gCopy); + PyObject* newarr = PyObject_CallFunctionObjArgs(npcopy, (PyObject*)self, nullptr); + Py_DECREF(npcopy); - return view; + return newarr; + } + +// never get here + return nullptr; } diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/ProxyWrappers.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/ProxyWrappers.cxx index 801337717e081..0c1c0e002ecfd 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/ProxyWrappers.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/ProxyWrappers.cxx @@ -499,11 +499,9 @@ PyObject* CPyCppyy::GetScopeProxy(Cppyy::TCppScope_t scope) // Retrieve scope proxy from the known ones. PyClassMap_t::iterator pci = gPyClasses.find(scope); if (pci != gPyClasses.end()) { - PyObject* pyclass = PyWeakref_GetObject(pci->second); - if (pyclass != Py_None) { - Py_INCREF(pyclass); + PyObject* pyclass = CPyCppyy_GetWeakRef(pci->second); + if (pyclass) return pyclass; - } } return nullptr; diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.cxx index e918d44fc0d54..abbf16ece0049 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.cxx @@ -7,6 +7,7 @@ PyObject* CPyCppyy::PyStrings::gAssign = nullptr; PyObject* CPyCppyy::PyStrings::gBases = nullptr; PyObject* CPyCppyy::PyStrings::gBase = nullptr; +PyObject* CPyCppyy::PyStrings::gCopy = nullptr; PyObject* CPyCppyy::PyStrings::gCppBool = nullptr; PyObject* CPyCppyy::PyStrings::gCppName = nullptr; PyObject* CPyCppyy::PyStrings::gAnnotations = nullptr; @@ -87,6 +88,7 @@ bool CPyCppyy::CreatePyStrings() { CPPYY_INITIALIZE_STRING(gAssign, __assign__); CPPYY_INITIALIZE_STRING(gBases, __bases__); CPPYY_INITIALIZE_STRING(gBase, __base__); + CPPYY_INITIALIZE_STRING(gCopy, copy); #if PY_VERSION_HEX < 0x03000000 CPPYY_INITIALIZE_STRING(gCppBool, __cpp_nonzero__); #else @@ -169,6 +171,7 @@ PyObject* CPyCppyy::DestroyPyStrings() { // Remove all cached python strings. Py_DECREF(PyStrings::gBases); PyStrings::gBases = nullptr; Py_DECREF(PyStrings::gBase); PyStrings::gBase = nullptr; + Py_DECREF(PyStrings::gCopy); PyStrings::gCopy = nullptr; Py_DECREF(PyStrings::gCppBool); PyStrings::gCppBool = nullptr; Py_DECREF(PyStrings::gCppName); PyStrings::gCppName = nullptr; Py_DECREF(PyStrings::gAnnotations); PyStrings::gAnnotations = nullptr; diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.h b/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.h index 7012b89ce1620..55eaef58273a3 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.h +++ b/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.h @@ -10,6 +10,7 @@ namespace PyStrings { extern PyObject* gAssign; extern PyObject* gBases; extern PyObject* gBase; + extern PyObject* gCopy; extern PyObject* gCppBool; extern PyObject* gCppName; extern PyObject* gAnnotations; diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx index 2196b94ff33f4..8559b2ebfe7ff 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx @@ -528,12 +528,14 @@ PyObject* VectorData(PyObject* self, PyObject*) //--------------------------------------------------------------------------- -PyObject* VectorArray(PyObject* self, PyObject* /* args */) +PyObject* VectorArray(PyObject* self, PyObject* args, PyObject* kwargs) { PyObject* pydata = VectorData(self, nullptr); - PyObject* view = PyObject_CallMethodNoArgs(pydata, PyStrings::gArray); + PyObject* arrcall = PyObject_GetAttr(pydata, PyStrings::gArray); + PyObject* newarr = PyObject_Call(arrcall, args, kwargs); + Py_DECREF(arrcall); Py_DECREF(pydata); - return view; + return newarr; } @@ -1809,7 +1811,7 @@ bool CPyCppyy::Pythonize(PyObject* pyclass, const std::string& name) Utility::AddToClass(pyclass, "data", (PyCFunction)VectorData); // numpy array conversion - Utility::AddToClass(pyclass, "__array__", (PyCFunction)VectorArray); + Utility::AddToClass(pyclass, "__array__", (PyCFunction)VectorArray, METH_VARARGS | METH_KEYWORDS /* unused */); // checked getitem if (HasAttrDirect(pyclass, PyStrings::gLen)) { diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Utility.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Utility.cxx index 03eda4eaaedc5..e4cf418b77c02 100644 --- a/bindings/pyroot/cppyy/CPyCppyy/src/Utility.cxx +++ b/bindings/pyroot/cppyy/CPyCppyy/src/Utility.cxx @@ -640,7 +640,7 @@ void CPyCppyy::Utility::ConstructCallbackPreamble(const std::string& retType, << retType << "\"), CPyCppyy::DestroyConverter};\n"; std::vector arg_is_ptr; if (nArgs) { - arg_is_ptr.reserve(nArgs); + arg_is_ptr.resize(nArgs); code << " CPYCPPYY_STATIC std::vector>> argcvs;\n" << " if (argcvs.empty()) {\n" << " argcvs.reserve(" << nArgs << ");\n"; diff --git a/bindings/pyroot/cppyy/cppyy-backend/clingwrapper/src/clingwrapper.cxx b/bindings/pyroot/cppyy/cppyy-backend/clingwrapper/src/clingwrapper.cxx index 0effab90721ec..72322aac644c2 100644 --- a/bindings/pyroot/cppyy/cppyy-backend/clingwrapper/src/clingwrapper.cxx +++ b/bindings/pyroot/cppyy/cppyy-backend/clingwrapper/src/clingwrapper.cxx @@ -257,7 +257,8 @@ class ApplicationStarter { "slice_array", "slice", "stack", "string", "strstream", "strstreambuf", "time_get_byname", "time_get", "time_put_byname", "time_put", "unary_function", "unary_negate", "unique_ptr", "underflow_error", "unordered_map", "unordered_multimap", - "unordered_multiset", "unordered_set", "valarray", "vector", "weak_ptr", "wstring"}; + "unordered_multiset", "unordered_set", "valarray", "vector", "weak_ptr", "wstring", + "__hash_not_enabled"}; for (auto& name : stl_names) gSTLNames.insert(name); diff --git a/bindings/pyroot/cppyy/cppyy/doc/source/changelog.rst b/bindings/pyroot/cppyy/cppyy/doc/source/changelog.rst index 60295fb89f3ed..033f819e17f4f 100644 --- a/bindings/pyroot/cppyy/cppyy/doc/source/changelog.rst +++ b/bindings/pyroot/cppyy/cppyy/doc/source/changelog.rst @@ -18,9 +18,15 @@ master * Improved overload selection for classes with deep hierarchies * Fixed regression when calling static methods with default args on instances * Fixed regression for pickling enums (in global scope only) +* Proper error handling on ``memoryview(array.array('B', []))`` * Auto-cast elements of std::vector, with T a class type * Add a ``Sequence_Check()`` method to the public API * Fix offset calculation of ``std::vector`` datamember on Mac arm +* Extend API to define executor and converter aliases +* Use importlib.metadata instead of pkg_resources for py3.11 and later +* Added out-of-bounds handling for small char-based enums +* Fixes for py3.12 and py3.13 +* Upgrade backend to Clang16 2023-11-15: 3.1.2 diff --git a/bindings/pyroot/cppyy/cppyy/python/cppyy/__init__.py b/bindings/pyroot/cppyy/cppyy/python/cppyy/__init__.py index 957443289d7c0..a7456a0c8b124 100644 --- a/bindings/pyroot/cppyy/cppyy/python/cppyy/__init__.py +++ b/bindings/pyroot/cppyy/cppyy/python/cppyy/__init__.py @@ -305,18 +305,27 @@ def add_library_path(path): if apipath_extra is None: try: - import pkg_resources as pr + if 0x30a0000 <= sys.hexversion: + import importlib.metadata as m - d = pr.get_distribution('CPyCppyy') - for line in d.get_metadata_lines('RECORD'): - if 'API.h' in line: - part = line[0:line.find(',')] + for p in m.files('CPyCppyy'): + if p.match('API.h'): + ape = p.locate() + break + del p, m + else: + import pkg_resources as pr + + d = pr.get_distribution('CPyCppyy') + for line in d.get_metadata_lines('RECORD'): + if 'API.h' in line: + ape = os.path.join(d.location, line[0:line.find(',')]) + break + del line, d, pr - ape = os.path.join(d.location, part) if os.path.exists(ape): apipath_extra = os.path.dirname(os.path.dirname(ape)) - - del part, d, pr + del ape except Exception: pass diff --git a/bindings/pyroot/cppyy/cppyy/python/cppyy/_stdcpp_fix.py b/bindings/pyroot/cppyy/cppyy/python/cppyy/_stdcpp_fix.py index 90c3687b41696..0004c87803b72 100644 --- a/bindings/pyroot/cppyy/cppyy/python/cppyy/_stdcpp_fix.py +++ b/bindings/pyroot/cppyy/cppyy/python/cppyy/_stdcpp_fix.py @@ -1,6 +1,6 @@ import sys -# It may be that the interpreter (wether python or pypy-c) was not linked +# It may be that the interpreter (whether python or pypy-c) was not linked # with C++; force its loading before doing anything else (note that not # linking with C++ spells trouble anyway for any C++ libraries ...) if 'linux' in sys.platform and 'GCC' in sys.version: diff --git a/bindings/pyroot/cppyy/cppyy/test/advancedcpp.cxx b/bindings/pyroot/cppyy/cppyy/test/advancedcpp.cxx index 3daa11c14f7d1..23f4c041f69c2 100644 --- a/bindings/pyroot/cppyy/cppyy/test/advancedcpp.cxx +++ b/bindings/pyroot/cppyy/cppyy/test/advancedcpp.cxx @@ -73,12 +73,12 @@ double pass_double_through_const_ref(const double& d) { return d; } // for math conversions testing -bool operator==(const some_comparable& c1, const some_comparable& c2 ) +bool operator==(const some_comparable& c1, const some_comparable& c2) { return &c1 != &c2; // the opposite of a pointer comparison } -bool operator!=( const some_comparable& c1, const some_comparable& c2 ) +bool operator!=(const some_comparable& c1, const some_comparable& c2) { return &c1 == &c2; // the opposite of a pointer comparison } diff --git a/bindings/pyroot/cppyy/cppyy/test/test_datatypes.py b/bindings/pyroot/cppyy/cppyy/test/test_datatypes.py index 710ff5eb1e982..26e2afc21b482 100644 --- a/bindings/pyroot/cppyy/cppyy/test/test_datatypes.py +++ b/bindings/pyroot/cppyy/cppyy/test/test_datatypes.py @@ -1038,10 +1038,10 @@ def test20_object_comparisons_with_cpp__eq__(self): struct Comparable1 { Comparable1(int i) : fInt(i) {} int fInt; - static bool __eq__(const Comparable1& self, const Comparable1& other){ + static bool __eq__(const Comparable1& self, const Comparable1& other) { return self.fInt == other.fInt; } - static bool __ne__(const Comparable1& self, const Comparable1& other){ + static bool __ne__(const Comparable1& self, const Comparable1& other) { return self.fInt != other.fInt; } }; @@ -1049,10 +1049,10 @@ def test20_object_comparisons_with_cpp__eq__(self): struct Comparable2 { Comparable2(int i) : fInt(i) {} int fInt; - bool __eq__(const Comparable2& other){ + bool __eq__(const Comparable2& other) { return fInt == other.fInt; } - bool __ne__(const Comparable2& other){ + bool __ne__(const Comparable2& other) { return fInt != other.fInt; } }; }""") diff --git a/bindings/pyroot/cppyy/cppyy/test/test_fragile.py b/bindings/pyroot/cppyy/cppyy/test/test_fragile.py index 75fce0b1c8dc3..fbacb7bb220fc 100644 --- a/bindings/pyroot/cppyy/cppyy/test/test_fragile.py +++ b/bindings/pyroot/cppyy/cppyy/test/test_fragile.py @@ -528,7 +528,7 @@ def get_errmsg(exc, allspace=allspace): err = get_errmsg(cppdef_exc) assert "FailedtoparsethegivenC++code" in err assert "error:" in err - assert "expectedunqualified-id" in err + assert "invaliddigit" in err assert "1aap=42;" in err def test22_cppexec(self): diff --git a/bindings/pyroot/cppyy/cppyy/test/test_regression.py b/bindings/pyroot/cppyy/cppyy/test/test_regression.py index cf0df68ceb899..6bd0baf387ba4 100644 --- a/bindings/pyroot/cppyy/cppyy/test/test_regression.py +++ b/bindings/pyroot/cppyy/cppyy/test/test_regression.py @@ -9,8 +9,12 @@ class TestREGRESSION: def setup_class(cls): import cppyy - def stringpager(text, cls=cls): - cls.helpout.append(text) + if sys.hexversion < 0x30d0000: + def stringpager(text, cls=cls): + cls.helpout.append(text) + else: + def stringpager(text, title='', cls=cls): + cls.helpout.append(text) import pydoc pydoc.pager = stringpager diff --git a/bindings/pyroot/cppyy/cppyy/test/test_stltypes.py b/bindings/pyroot/cppyy/cppyy/test/test_stltypes.py index 771cc941bc51e..b42fa0b08b16a 100644 --- a/bindings/pyroot/cppyy/cppyy/test/test_stltypes.py +++ b/bindings/pyroot/cppyy/cppyy/test/test_stltypes.py @@ -1704,9 +1704,9 @@ def test01_deque_byvalue_regression(self): """Return by value of a deque used to crash""" import cppyy - assert cppyy.cppdef("""std::deque f() { + assert cppyy.cppdef("""std::deque emptyf() { std::deque d; d.push_back(0); return d ; }""") - x = cppyy.gbl.f() + x = cppyy.gbl.emptyf() assert x del x diff --git a/bindings/pyroot/cppyy/patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch b/bindings/pyroot/cppyy/patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch index 975acf0d3b5f2..df089f1be5bb6 100644 --- a/bindings/pyroot/cppyy/patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch +++ b/bindings/pyroot/cppyy/patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch @@ -1,7 +1,7 @@ From 24b94cde0a5fa6b46be05359b7218af9bb295d87 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Tue, 12 Mar 2024 01:59:37 +0100 -Subject: [PATCH] [CPyCppyy] Adapt to no `std::` in ROOT +Subject: [PATCH 1/2] [CPyCppyy] Adapt to no `std::` in ROOT --- .../pyroot/cppyy/CPyCppyy/src/Converters.cxx | 20 +++++++++++-------- @@ -127,3 +127,58 @@ index c1720cf3f2..ae0e31cac8 100644 -- 2.44.0 +From ef0836c23c850ce3113d5a7ff5787dee9e094099 Mon Sep 17 00:00:00 2001 +From: Aaron Jomy +Date: Tue, 21 Jan 2025 14:09:03 +0100 +Subject: [PATCH 2/2] [PyROOT] Add executors and converters for `std::byte` + +Fixes issue: https://github.com/root-project/root/issues/17442 +--- + bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx | 3 +++ + bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx | 5 +++++ + 2 files changed, 8 insertions(+) + +diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx +index c127604a6e..21d3d4aa73 100644 +--- a/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx ++++ b/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx +@@ -3522,8 +3522,11 @@ public: + gf["const signed char&"] = gf["const char&"]; + #if __cplusplus > 201402L + gf["std::byte"] = gf["uint8_t"]; ++ gf["byte"] = gf["uint8_t"]; + gf["const std::byte&"] = gf["const uint8_t&"]; ++ gf["const byte&"] = gf["const uint8_t&"]; + gf["std::byte&"] = gf["uint8_t&"]; ++ gf["byte&"] = gf["uint8_t&"]; + #endif + gf["std::int8_t"] = gf["int8_t"]; + gf["const std::int8_t&"] = gf["const int8_t&"]; +diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx +index 5e94846771..edefcf5b5b 100644 +--- a/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx ++++ b/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx +@@ -1022,6 +1022,8 @@ public: + #if __cplusplus > 201402L + gf["std::byte ptr"] = (ef_t)+[](cdims_t d) { return new ByteArrayExecutor{d}; }; + gf["const std::byte ptr"] = gf["std::byte ptr"]; ++ gf["byte ptr"] = gf["std::byte ptr"]; ++ gf["const byte ptr"] = gf["std::byte ptr"]; + #endif + gf["int8_t ptr"] = (ef_t)+[](cdims_t d) { return new Int8ArrayExecutor{d}; }; + gf["uint8_t ptr"] = (ef_t)+[](cdims_t d) { return new UInt8ArrayExecutor{d}; }; +@@ -1046,8 +1048,11 @@ public: + gf["internal_enum_type_t ptr"] = gf["int ptr"]; + #if __cplusplus > 201402L + gf["std::byte"] = gf["uint8_t"]; ++ gf["byte"] = gf["uint8_t"]; + gf["std::byte&"] = gf["uint8_t&"]; ++ gf["byte&"] = gf["uint8_t&"]; + gf["const std::byte&"] = gf["const uint8_t&"]; ++ gf["const byte&"] = gf["const uint8_t&"]; + #endif + gf["std::int8_t"] = gf["int8_t"]; + gf["std::int8_t&"] = gf["int8_t&"]; +-- +2.43.0 + diff --git a/bindings/pyroot/cppyy/patches/CPyCppyy-Always-convert-returned-std-string.patch b/bindings/pyroot/cppyy/patches/CPyCppyy-Always-convert-returned-std-string.patch index 08a8ac48ea532..2421a72cdf11b 100644 --- a/bindings/pyroot/cppyy/patches/CPyCppyy-Always-convert-returned-std-string.patch +++ b/bindings/pyroot/cppyy/patches/CPyCppyy-Always-convert-returned-std-string.patch @@ -89,8 +89,8 @@ index 3ab4c8b3a1..ae0e31cac8 100644 } +#endif - // This pythonization is disabled for ROOT because it is a bit buggy - #if 0 + if (Cppyy::IsAggregate(((CPPClass*)pyclass)->fCppType) && name.compare(0, 5, "std::", 5) != 0) { + // create a pseudo-constructor to allow initializer-style object creation -- 2.44.0 diff --git a/bindings/pyroot/cppyy/patches/CPyCppyy-Don-t-attempt-to-expose-protected-data-members.patch b/bindings/pyroot/cppyy/patches/CPyCppyy-Don-t-attempt-to-expose-protected-data-members.patch new file mode 100644 index 0000000000000..e373f6b9afa76 --- /dev/null +++ b/bindings/pyroot/cppyy/patches/CPyCppyy-Don-t-attempt-to-expose-protected-data-members.patch @@ -0,0 +1,77 @@ +From 8f54f8c5434ff593b5a3acc3f97e4cd5f0310fdd Mon Sep 17 00:00:00 2001 +From: Jonas Rembser +Date: Thu, 7 Nov 2024 10:19:04 +0100 +Subject: [PATCH] [CPyCppyy] Don't attempt to expose protected data members in + dispatcher + +This mechanism crashes in Python 3.13, and it also didn't work before +with previous Python 3 versions: + +```python +import cppyy + +cppyy.cppdef(""" + +class MyBaseClass { +public: + virtual ~MyBaseClass() = default; +protected: + int protectedFunc() { return 5; } + int _protectedData = 4; +}; + +""") + +class MyDerivedClass(cppyy.gbl.MyBaseClass): + pass + +my_obj = MyDerivedClass() + +print(my_obj.protectedFunc()) # works! +print(my_obj._protectedData) # doesn't work! +``` + +Here is the output with Python 3.8 on lxplus for example: + +```txt +5 +Traceback (most recent call last): + File "/afs/cern.ch/user/r/rembserj/repro.py", line 21, in + print(my_obj._protectedData) # doesn't work! +AttributeError: 'MyDerivedClass' object has no attribute '_protectedData' +``` + +It actually worked in the past before the cppyy upgrade in ROOT 6.32. + +Therefore, there is still a regression that should be fixed. + +However, commenting out the code that now doesn't work anyway still +helps to avoid the crashes in Python 3.13, so this commit suggests to do +this. +--- + bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx +index cdef2b8c7b..0fd1705966 100644 +--- a/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx ++++ b/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx +@@ -407,6 +407,7 @@ bool CPyCppyy::InsertDispatcher(CPPScope* klass, PyObject* bases, PyObject* dct, + + // destructor: default is fine + ++#if 0 // doesn't work + // pull in data members that are protected + bool setPublic = false; + for (const auto& binfo : base_infos) { +@@ -426,6 +427,7 @@ bool CPyCppyy::InsertDispatcher(CPPScope* klass, PyObject* bases, PyObject* dct, + } + } + } ++#endif + + // initialize the dispatch pointer for all direct bases that have one + BaseInfos_t::size_type disp_inited = 0; +-- +2.47.0 + diff --git a/bindings/pyroot/cppyy/patches/CPyCppyy-Prevent-construction-of-agg-init-for-tuple.patch b/bindings/pyroot/cppyy/patches/CPyCppyy-Prevent-construction-of-agg-init-for-tuple.patch new file mode 100644 index 0000000000000..eb990aae4ffac --- /dev/null +++ b/bindings/pyroot/cppyy/patches/CPyCppyy-Prevent-construction-of-agg-init-for-tuple.patch @@ -0,0 +1,27 @@ +From 3b62eaa9ec2dfabccca52910d8239af7d9e56c9a Mon Sep 17 00:00:00 2001 +From: maximusron +Date: Sun, 29 Sep 2024 09:32:17 +0200 +Subject: [PATCH] [PyROOT] Prevent construction of aggregate initializer for + std::tuple + +--- + bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx +index b5d5290e46..2196b94ff3 100644 +--- a/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx ++++ b/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx +@@ -1720,7 +1720,8 @@ bool CPyCppyy::Pythonize(PyObject* pyclass, const std::string& name) + } + #endif + +- if (Cppyy::IsAggregate(((CPPClass*)pyclass)->fCppType) && name.compare(0, 5, "std::", 5) != 0) { ++ if (Cppyy::IsAggregate(((CPPClass*)pyclass)->fCppType) && name.compare(0, 5, "std::", 5) != 0 && ++ name.compare(0, 6, "tuple<", 6) != 0) { + // create a pseudo-constructor to allow initializer-style object creation + Cppyy::TCppType_t kls = ((CPPClass*)pyclass)->fCppType; + Cppyy::TCppIndex_t ndata = Cppyy::GetNumDatamembers(kls); +-- +2.47.0 + diff --git a/bindings/pyroot/cppyy/sync-upstream b/bindings/pyroot/cppyy/sync-upstream index 2139890c32418..c5d5c15e36851 100755 --- a/bindings/pyroot/cppyy/sync-upstream +++ b/bindings/pyroot/cppyy/sync-upstream @@ -45,6 +45,7 @@ git apply patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch git apply patches/CPyCppyy-Always-convert-returned-std-string.patch git apply patches/CPyCppyy-Disable-implicit-conversion-to-smart-ptr.patch git apply patches/CPyCppyy-TString_converter.patch +git apply patches/CPyCppyy-Prevent-construction-of-agg-init-for-tuple.patch git apply patches/cppyy-No-CppyyLegacy-namespace.patch git apply patches/cppyy-Remove-Windows-workaround.patch git apply patches/cppyy-Don-t-enable-cling-autoloading.patch diff --git a/bindings/pyroot/pythonizations/CMakeLists.txt b/bindings/pyroot/pythonizations/CMakeLists.txt index fd19abecfaec6..20f2076e90238 100644 --- a/bindings/pyroot/pythonizations/CMakeLists.txt +++ b/bindings/pyroot/pythonizations/CMakeLists.txt @@ -80,31 +80,44 @@ set(py_sources ROOT/_facade.py ROOT/__init__.py ROOT/_numbadeclare.py + ROOT/_pythonization/__init__.py ROOT/_pythonization/_cppinstance.py ROOT/_pythonization/_drawables.py ROOT/_pythonization/_generic.py - ROOT/_pythonization/__init__.py + ROOT/_pythonization/_memory_utils.py ROOT/_pythonization/_pyz_utils.py - ROOT/_pythonization/_rvec.py ROOT/_pythonization/_runtime_error.py + ROOT/_pythonization/_rvec.py ROOT/_pythonization/_stl_vector.py ROOT/_pythonization/_tarray.py ROOT/_pythonization/_tclass.py ROOT/_pythonization/_tclonesarray.py ROOT/_pythonization/_tcollection.py + ROOT/_pythonization/_tcolor.py ROOT/_pythonization/_tcomplex.py ROOT/_pythonization/_tcontext.py - ROOT/_pythonization/_tdirectoryfile.py ROOT/_pythonization/_tdirectory.py - ROOT/_pythonization/_tfile.py + ROOT/_pythonization/_tdirectoryfile.py + ROOT/_pythonization/_tefficiency.py + ROOT/_pythonization/_tentrylist.py + ROOT/_pythonization/_teventlist.py ROOT/_pythonization/_tf1.py + ROOT/_pythonization/_tf2.py + ROOT/_pythonization/_tf3.py + ROOT/_pythonization/_tfile.py + ROOT/_pythonization/_tfilemerger.py + ROOT/_pythonization/_tformula.py ROOT/_pythonization/_tgraph.py + ROOT/_pythonization/_tgraph2d.py ROOT/_pythonization/_th1.py + ROOT/_pythonization/_th2.py + ROOT/_pythonization/_th3.py ROOT/_pythonization/_titer.py ROOT/_pythonization/_tobject.py ROOT/_pythonization/_tobjstring.py ROOT/_pythonization/_tseqcollection.py ROOT/_pythonization/_tstring.py + ROOT/_pythonization/_tstyle.py ROOT/_pythonization/_ttree.py ROOT/_pythonization/_tvector3.py ROOT/_pythonization/_tvectort.py @@ -113,11 +126,9 @@ set(py_sources set(cpp_sources src/PyROOTModule.cxx - src/PyROOTWrapper.cxx src/RPyROOTApplication.cxx src/GenericPyz.cxx src/TClassPyz.cxx - src/TMemoryRegulator.cxx src/TObjectPyz.cxx src/TTreePyz.cxx src/CPPInstancePyz.cxx @@ -182,9 +193,9 @@ endif() # Compile .py files foreach(py_source ${py_sources}) add_custom_command(TARGET ${libname} + POST_BUILD COMMAND ${Python3_EXECUTABLE} -m py_compile ${localruntimedir}/${py_source} COMMAND ${Python3_EXECUTABLE} -O -m py_compile ${localruntimedir}/${py_source} - DEPENDS ${localruntimedir}/${py_source} COMMENT "Compiling PyROOT source ${py_source} for Python ${Python3_VERSION}") endforeach() diff --git a/bindings/pyroot/pythonizations/python/ROOT/__init__.py b/bindings/pyroot/pythonizations/python/ROOT/__init__.py index 211e2001dfdd7..b8b0e04dfa7ad 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/__init__.py +++ b/bindings/pyroot/pythonizations/python/ROOT/__init__.py @@ -184,20 +184,4 @@ def cleanup(): facade.__dict__["app"].keep_polling = False facade.__dict__["app"].process_root_events.join() - if "libROOTPythonizations" in sys.modules: - backend = sys.modules["libROOTPythonizations"] - - # Make sure all the objects regulated by PyROOT are deleted and their - # Python proxies are properly nonified. - backend.ClearProxiedObjects() - - from ROOT import PyConfig - - if PyConfig.ShutDown: - # Hard teardown: run part of the gROOT shutdown sequence. - # Running it here ensures that it is done before any ROOT libraries - # are off-loaded, with unspecified order of static object destruction. - backend.gROOT.EndOfProcessCleanups() - - atexit.register(cleanup) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_facade.py b/bindings/pyroot/pythonizations/python/ROOT/_facade.py index 8bb5ff2aa92d9..69a936e271808 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_facade.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_facade.py @@ -8,8 +8,6 @@ import cppyy.ll -from libROOTPythonizations import gROOT - from ._application import PyROOTApplication from ._numbadeclare import _NumbaDeclareDecorator @@ -36,7 +34,7 @@ class _gROOTWrapper(object): def __init__(self, facade): self.__dict__["_facade"] = facade - self.__dict__["_gROOT"] = gROOT + self.__dict__["_gROOT"] = cppyy.gbl.ROOT.GetROOT() def __getattr__(self, name): if name != "SetBatch" and self._facade.__dict__["gROOT"] != self._gROOT: @@ -158,7 +156,7 @@ def _fallback_getattr(self, name): elif hasattr(cppyy.gbl.ROOT, name): return getattr(cppyy.gbl.ROOT, name) else: - res = gROOT.FindObject(name) + res = self.gROOT.FindObject(name) if res: return res raise AttributeError("Failed to get attribute {} from ROOT".format(name)) @@ -204,7 +202,10 @@ def _register_converters_and_executors(self): def _finalSetup(self): # Prevent this method from being re-entered through the gROOT wrapper - self.__dict__["gROOT"] = gROOT + self.__dict__["gROOT"] = cppyy.gbl.ROOT.GetROOT() + + # Make sure the interpreter is initialized once gROOT has been initialized + cppyy.gbl.TInterpreter.Instance() # Setup interactive usage from Python self.__dict__["app"] = PyROOTApplication(self.PyConfig, self._is_ipython) @@ -387,7 +388,7 @@ def TMVA(self): from ._pythonization import _tmva ns = self._fallback_getattr("TMVA") - hasRDF = "dataframe" in gROOT.GetConfigFeatures() + hasRDF = "dataframe" in self.gROOT.GetConfigFeatures() if hasRDF: try: from ._pythonization._tmva import inject_rbatchgenerator, _AsRTensor, SaveXGBoost diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_memory_utils.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_memory_utils.py new file mode 100644 index 0000000000000..350012b2e687d --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_memory_utils.py @@ -0,0 +1,65 @@ +# Author: Vincenzo Eduardo Padulano 12/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + + +def _should_give_up_ownership(object): + """ + Ownership of objects which automatically register to a directory should be + left to C++, except if the object is gROOT. + """ + import ROOT + + tdir = object.GetDirectory() + return bool(tdir) and tdir is not ROOT.gROOT + + +def _constructor_releasing_ownership(self, *args, **kwargs): + """ + Forward the arguments to the C++ constructor and give up ownership if the + object is attached to a directory, which is then the owner. The only + exception is when the owner is gROOT, to avoid introducing a + backwards-incompatible change. + """ + import ROOT + + self._cpp_constructor(*args, **kwargs) + if _should_give_up_ownership(self): + ROOT.SetOwnership(self, False) + + +def _Clone_releasing_ownership(self, *args, **kwargs): + """ + Analogous to _constructor_releasing_ownership, but for the TObject::Clone() + implementation. + """ + import ROOT + + out = self._Original_Clone(*args, **kwargs) + if _should_give_up_ownership(out): + ROOT.SetOwnership(out, False) + return out + + +def inject_constructor_releasing_ownership(klass): + klass._cpp_constructor = klass.__init__ + klass.__init__ = _constructor_releasing_ownership + +def inject_clone_releasing_ownership(klass): + klass._Original_Clone = klass.Clone + klass.Clone = _Clone_releasing_ownership + + +def _SetDirectory_SetOwnership(self, dir): + self._Original_SetDirectory(dir) + if dir: + # If we are actually registering with a directory, give ownership to C++ + import ROOT + + ROOT.SetOwnership(self, False) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_rdf_pyz.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_rdf_pyz.py index cc0b8c7ec98b7..b3b277b14657a 100755 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_rdf_pyz.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_rdf_pyz.py @@ -332,31 +332,9 @@ def x_more_than_y(x): rdf_node = _handle_cpp_callables(func, rdf._OriginalFilter, func, *_convert_to_vector(args)) if rdf_node is not None: return rdf_node - - jitter = FunctionJitter(rdf) - func.__annotations__['return'] = 'bool' # return type for Filters is bool # Note: You can keep double and Filter still works. - - col_list = [] - filter_name = "" - - if len(args) == 1: - if isinstance(args[0], list): - col_list = args[0] - elif isinstance(args[0], str): - filter_name = args[0] - else: - raise ValueError(f"Argument should be either 'list' or 'str', not {type(args[0]).__name__}.") - - elif len(args) == 2: - if isinstance(args[0], list) and isinstance(args[1], str): - col_list = args[0] - filter_name = args[1] - else: - raise ValueError(f"Arguments should be ('list', 'str',) not ({type(args[0]).__name__,type(args[1]).__name__}.") - - - func_call = jitter.jit_function(func, col_list, extra_args) - return rdf._OriginalFilter("Numba::" + func_call, filter_name) + else: + raise NotImplementedError( + f"Passing callables of type {type(func)} will be supported in future versions of ROOT.") def _PyDefine(rdf, col_name, callable_or_str, cols = [] , extra_args = {} ): """ @@ -403,7 +381,6 @@ def x_scaled(x): rdf_node = _handle_cpp_callables(func, rdf._OriginalDefine, col_name, func, cols) if rdf_node is not None: return rdf_node - - jitter = FunctionJitter(rdf) - func_call = jitter.jit_function(func, cols, extra_args) - return rdf._OriginalDefine(col_name, "Numba::" + func_call) + else: + raise NotImplementedError( + f"Passing callables of type {type(func)} will be supported in future versions of ROOT.") diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooworkspace.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooworkspace.py index 616d0c3984313..ffc2ddc05a1c0 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooworkspace.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooworkspace.py @@ -110,6 +110,13 @@ def __setattr__(self, name, value): raise AttributeError('Resetting the "' + name + '" attribute of a RooWorkspace is not allowed!') object.__setattr__(self, name, value) + def _ipython_key_completions_(self): + r""" + Support tab completion for `__getitem__`, suggesting all components in + the workspace. + """ + return [c.GetName() for c in self.components()] + def RooWorkspace_import(self, *args, **kwargs): r"""The RooWorkspace::import function can't be used in PyROOT because `import` is a reserved python keyword. diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tcolor.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tcolor.py new file mode 100644 index 0000000000000..a4ddfd2411b8c --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tcolor.py @@ -0,0 +1,25 @@ +# Author: Vincenzo Eduardo Padulano CERN 11/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ +from . import pythonization + +def _TColor_constructor(self, *args, **kwargs): + """ + Forward the arguments to the C++ constructor and retain ownership. This + helps avoiding double deletes due to ROOT automatic memory management. + """ + self._cpp_constructor(*args, **kwargs) + import ROOT + ROOT.SetOwnership(self, False) + + +@pythonization("TColor") +def pythonize_tcolor(klass): + klass._cpp_constructor = klass.__init__ + klass.__init__ = _TColor_constructor diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tdirectory.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tdirectory.py index 01ca47eba1251..3331ec5aad9ba 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tdirectory.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tdirectory.py @@ -123,12 +123,21 @@ def _TDirectory_WriteObject(self, obj, *args): return self.WriteObjectAny(obj, type(obj).__cpp_name__, *args) +def _ipython_key_completions_(self): + r""" + Support tab completion for `__getitem__`, suggesting the names of all + objects in the file. + """ + return [k.GetName() for k in self.GetListOfKeys()] + + def pythonize_tdirectory(): klass = cppyy.gbl.TDirectory klass.__getitem__ = _TDirectory_getitem klass.__getattr__ = _TDirectory_getattr klass._WriteObject = klass.WriteObject klass.WriteObject = _TDirectory_WriteObject + klass._ipython_key_completions_ = _ipython_key_completions_ # Instant pythonization (executed at `import ROOT` time), no need of a diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tefficiency.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tefficiency.py new file mode 100644 index 0000000000000..7c9c4c69b9260 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tefficiency.py @@ -0,0 +1,26 @@ +# Author: Vincenzo Eduardo Padulano 12/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + +from . import pythonization + + +def _SetDirectory_SetOwnership(self, dir): + self._Original_SetDirectory(dir) + if dir: + # If we are actually registering with a directory, give ownership to C++ + import ROOT + ROOT.SetOwnership(self, False) + + +@pythonization("TEfficiency") +def pythonize_tefficiency(klass): + + klass._Original_SetDirectory = klass.SetDirectory + klass.SetDirectory = _SetDirectory_SetOwnership diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tentrylist.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tentrylist.py new file mode 100644 index 0000000000000..06257a49605d4 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tentrylist.py @@ -0,0 +1,21 @@ +# Author: Vincenzo Eduardo Padulano 12/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + +from . import pythonization +from ROOT._pythonization._memory_utils import _constructor_releasing_ownership, _SetDirectory_SetOwnership + + +@pythonization("TEntryList") +def pythonize_tentrylist(klass): + klass._cpp_constructor = klass.__init__ + klass.__init__ = _constructor_releasing_ownership + + klass._Original_SetDirectory = klass.SetDirectory + klass.SetDirectory = _SetDirectory_SetOwnership diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_teventlist.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_teventlist.py new file mode 100644 index 0000000000000..0175c74fcebb3 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_teventlist.py @@ -0,0 +1,21 @@ +# Author: Vincenzo Eduardo Padulano 12/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + +from . import pythonization +from ROOT._pythonization._memory_utils import _constructor_releasing_ownership, _SetDirectory_SetOwnership + + +@pythonization("TEventList") +def pythonize_tentrylist(klass): + klass._cpp_constructor = klass.__init__ + klass.__init__ = _constructor_releasing_ownership + + klass._Original_SetDirectory = klass.SetDirectory + klass.SetDirectory = _SetDirectory_SetOwnership diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf1.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf1.py index 9aa9fc4883aae..5994b6b20ae00 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf1.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf1.py @@ -98,9 +98,23 @@ def _TF1_EvalPar(self, vars, params): ROOT.Internal.EvalParMultiDim(self, out, x, x_size, nrows, params) return numpy.frombuffer(out, dtype=numpy.float64, count=nrows) + +def _TF1_Constructor(self, *args, **kwargs): + """ + Forward the arguments to the C++ constructor and retain ownership. This + helps avoiding double deletes due to ROOT automatic memory management. + """ + self._cpp_constructor(*args, **kwargs) + import ROOT + ROOT.SetOwnership(self, False) + + @pythonization('TF1') def pythonize_tf1(klass): # Pythonizations for TH1::EvalPar klass._EvalPar = klass.EvalPar klass.EvalPar = _TF1_EvalPar + + klass._cpp_constructor = klass.__init__ + klass.__init__ = _TF1_Constructor diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf2.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf2.py new file mode 100644 index 0000000000000..0ce9220100c00 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf2.py @@ -0,0 +1,25 @@ +# Author: Vincenzo Eduardo Padulano CERN 11/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ +from . import pythonization + +def _TF2_constructor(self, *args, **kwargs): + """ + Forward the arguments to the C++ constructor and retain ownership. This + helps avoiding double deletes due to ROOT automatic memory management. + """ + self._cpp_constructor(*args, **kwargs) + import ROOT + ROOT.SetOwnership(self, False) + + +@pythonization("TF2") +def pythonize_tf2(klass): + klass._cpp_constructor = klass.__init__ + klass.__init__ = _TF2_constructor diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf3.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf3.py new file mode 100644 index 0000000000000..11f5e8db240f6 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf3.py @@ -0,0 +1,25 @@ +# Author: Vincenzo Eduardo Padulano CERN 11/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ +from . import pythonization + +def _TF3_constructor(self, *args, **kwargs): + """ + Forward the arguments to the C++ constructor and retain ownership. This + helps avoiding double deletes due to ROOT automatic memory management. + """ + self._cpp_constructor(*args, **kwargs) + import ROOT + ROOT.SetOwnership(self, False) + + +@pythonization("TF3") +def pythonize_tf3(klass): + klass._cpp_constructor = klass.__init__ + klass.__init__ = _TF3_constructor diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tfilemerger.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tfilemerger.py new file mode 100644 index 0000000000000..bfed1cb6829e6 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tfilemerger.py @@ -0,0 +1,31 @@ +# Author: Giacomo Parolini CERN 12/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + +from . import pythonization + +def _TFileMergerExit(obj, exc_type, exc_val, exc_tb): + """ + Close the merger's output file. + Signature and return value are imposed by Python, see + https://docs.python.org/3/library/stdtypes.html#typecontextmanager + """ + obj.CloseOutputFile() + return False + + +@pythonization('TFileMerger') +def pythonize_tfile_merger(klass): + """ + TFileMerger works as a context manager. + """ + # Pythonization for __enter__ and __exit__ methods + # These make TFileMerger usable in a `with` statement as a context manager + klass.__enter__ = lambda merger: merger + klass.__exit__ = _TFileMergerExit diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tformula.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tformula.py new file mode 100644 index 0000000000000..2bc29dc872979 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tformula.py @@ -0,0 +1,28 @@ +# Author: Vincenzo Eduardo Padulano CERN 11/2024 +# Author: Jonas Rembser CERN 11/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ +from . import pythonization + + +def _TFormula_Constructor(self, *args, **kwargs): + """ + Forward the arguments to the C++ constructor and retain ownership. This + helps avoiding double deletes due to ROOT automatic memory management. + """ + self._cpp_constructor(*args, **kwargs) + import ROOT + ROOT.SetOwnership(self, False) + + +@pythonization('TFormula') +def pythonize_tformula(klass): + + klass._cpp_constructor = klass.__init__ + klass.__init__ = _TFormula_Constructor diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tgraph2d.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tgraph2d.py new file mode 100644 index 0000000000000..571f9a3225d55 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tgraph2d.py @@ -0,0 +1,21 @@ +# Author: Vincenzo Eduardo Padulano 12/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + +from . import pythonization +from ROOT._pythonization._memory_utils import _constructor_releasing_ownership, _SetDirectory_SetOwnership + + +@pythonization("TGraph2D") +def pythonize_tgraph2d(klass): + klass._cpp_constructor = klass.__init__ + klass.__init__ = _constructor_releasing_ownership + + klass._Original_SetDirectory = klass.SetDirectory + klass.SetDirectory = _SetDirectory_SetOwnership diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th1.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th1.py index e8158168bde96..5f40906e53ddc 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th1.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th1.py @@ -9,7 +9,7 @@ ################################################################################ from . import pythonization - +from ROOT._pythonization._memory_utils import inject_constructor_releasing_ownership, inject_clone_releasing_ownership, _SetDirectory_SetOwnership # Multiplication by constant @@ -23,6 +23,22 @@ def _imul(self, c): return self +# The constructors need to be pythonized for each derived class separately: +_th1_derived_classes_to_pythonize = [ + "TH1C", + "TH1S", + "TH1I", + "TH1L", + "TH1F", + "TH1D", + "TH1K", + "TProfile", +] + +for klass in _th1_derived_classes_to_pythonize: + pythonization(klass)(inject_constructor_releasing_ownership) + + @pythonization('TH1') def pythonize_th1(klass): # Parameters: @@ -30,3 +46,8 @@ def pythonize_th1(klass): # Support hist *= scalar klass.__imul__ = _imul + + klass._Original_SetDirectory = klass.SetDirectory + klass.SetDirectory = _SetDirectory_SetOwnership + + inject_clone_releasing_ownership(klass) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th2.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th2.py new file mode 100644 index 0000000000000..0e030374eef7a --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th2.py @@ -0,0 +1,31 @@ +# Author: Vincenzo Eduardo Padulano 12/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + +from . import pythonization +from ROOT._pythonization._memory_utils import inject_constructor_releasing_ownership + + +# The constructors need to be pythonized for each derived class separately: +_th2_derived_classes_to_pythonize = [ + "TH2C", + "TH2S", + "TH2I", + "TH2L", + "TH2F", + "TH2D", + # "TH2Poly", # Derives from TH2 but does not automatically register + # "TH2PolyBin", Does not derive from TH2 + "TProfile2D", + # "TProfile2PolyBin", Derives from TH2PolyBin which does not derive from TH2 + "TProfile2Poly", +] + +for klass in _th2_derived_classes_to_pythonize: + pythonization(klass)(inject_constructor_releasing_ownership) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th3.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th3.py new file mode 100644 index 0000000000000..9776d0489c761 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th3.py @@ -0,0 +1,28 @@ +# Author: Vincenzo Eduardo Padulano 12/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + +from . import pythonization +from ROOT._pythonization._memory_utils import inject_constructor_releasing_ownership + + +# The constructors need to be pythonized for each derived class separately: +_th3_derived_classes_to_pythonize = [ + # "TGLTH3Composition", Derives from TH3 but does not automatically register + "TH3C", + "TH3S", + "TH3I", + "TH3L", + "TH3F", + "TH3D", + "TProfile3D", +] + +for klass in _th3_derived_classes_to_pythonize: + pythonization(klass)(inject_constructor_releasing_ownership) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/__init__.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/__init__.py index 72c210663d9cf..7e75e92cef032 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/__init__.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/__init__.py @@ -16,8 +16,6 @@ from .. import pythonization -from libROOTPythonizations import gROOT - from ._factory import Factory from ._dataloader import DataLoader from ._crossvalidation import CrossValidation @@ -45,7 +43,7 @@ def inject_rbatchgenerator(ns): from ._gnn import RModel_GNN, RModel_GraphIndependent -hasRDF = "dataframe" in gROOT.GetConfigFeatures() +hasRDF = "dataframe" in cppyy.gbl.ROOT.GetROOT().GetConfigFeatures() if hasRDF: from ._rtensor import get_array_interface, add_array_interface_property, RTensorGetitem, pythonize_rtensor, _AsRTensor diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_batchgenerator.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_batchgenerator.py index 008be762aeec8..6cf046e90a97a 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_batchgenerator.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_batchgenerator.py @@ -1,3 +1,16 @@ +# Author: Dante Niewenhuis, VU Amsterdam 07/2023 +# Author: Kristupas Pranckietis, Vilnius University 05/2024 +# Author: Nopphakorn Subsa-Ard, King Mongkut's University of Technology Thonburi (KMUTT) (TH) 08/2024 +# Author: Vincenzo Eduardo Padulano, CERN 10/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + from __future__ import annotations from typing import Any, Callable, Tuple, TYPE_CHECKING @@ -12,8 +25,7 @@ class BaseGenerator: def get_template( self, - tree_name: str, - file_name: str, + x_rdf: RNode, columns: list[str] = list(), max_vec_sizes: dict[str, int] = dict(), ) -> Tuple[str, list[int]]: @@ -22,8 +34,7 @@ def get_template( RDataFrame and columns. Args: - file_name (str): name of the root file. - tree_name (str): name of the tree in the root file. + rdataframe (RNode): RDataFrame or RNode object. columns (list[str]): Columns that should be loaded. Defaults to loading all columns in the given RDataFrame @@ -33,65 +44,23 @@ def get_template( template (str): Template for the RBatchGenerator """ - # from cppyy.gbl.ROOT import RDataFrame - from ROOT import RDataFrame - - x_rdf = RDataFrame(tree_name, file_name) - if not columns: columns = x_rdf.GetColumnNames() - template_dict = { - "Bool_t": "bool&", - "Double_t": "double&", - "Double32_t": "double&", - "Float_t": "float&", - "Float16_t": "float&", - "Int_t": "int&", - "UInt_t": "unsigned int&", - "Long_t": "long&", - "ULong_t": "unsigned long&", - "Long64_t": "long long&", - "ULong64_t": "unsigned long long&", - "Short_t": "short&", - "UShort_t": "unsigned short&", - - "ROOT::VecOps::RVec": "ROOT::RVec", - "ROOT::VecOps::RVec": "ROOT::RVec", - "ROOT::VecOps::RVec": "ROOT::RVec", - "ROOT::VecOps::RVec": "ROOT::RVec", - "ROOT::VecOps::RVec": "ROOT::RVec", - "ROOT::VecOps::RVec": "ROOT::RVec", - "ROOT::VecOps::RVec": "ROOT::RVec", - "ROOT::VecOps::RVec": "ROOT::RVec", - "ROOT::VecOps::RVec": "ROOT::RVec" - } - template_string = "" self.given_columns = [] self.all_columns = [] - # Get the types of the different columns max_vec_sizes_list = [] for name in columns: name_str = str(name) self.given_columns.append(name_str) - column_type = template_dict[str(x_rdf.GetColumnType(name_str))] - template_string += column_type + "," - - if column_type in [ - "ROOT::RVec", - "ROOT::RVec", - "ROOT::RVec", - "ROOT::RVec", - "ROOT::RVec", - "ROOT::RVec", - "ROOT::RVec", - "ROOT::RVec", - "ROOT::RVec" - ]: + column_type = x_rdf.GetColumnType(name_str) + template_string = f"{template_string}{column_type}," + + if "RVec" in column_type: # Add column for each element if column is a vector if name_str in max_vec_sizes: max_vec_sizes_list.append(max_vec_sizes[name_str]) @@ -111,41 +80,37 @@ def get_template( def __init__( self, - tree_name: str, - file_name: str, + rdataframe: RNode, batch_size: int, chunk_size: int, columns: list[str] = list(), - filters: list[str] = list(), max_vec_sizes: dict[str, int] = dict(), vec_padding: int = 0, - target: str = "", + target: str | list[str] = list(), weights: str = "", validation_split: float = 0.0, max_chunks: int = 0, shuffle: bool = True, + drop_remainder: bool = True, ): """Wrapper around the Cpp RBatchGenerator - Args: - tree_name (str): Name of the tree in the ROOT file - file_name (str): Path to the ROOT file + Args: + rdataframe (RNode): Name of RNode object. batch_size (int): Size of the returned chunks. chunk_size (int): - The size of the chunks loaded from the ROOT file. Higher chunk - size results in better randomization, but higher memory usage. + The size of the chunks loaded from the ROOT file. Higher chunk size + results in better randomization, but also higher memory usage. columns (list[str], optional): Columns to be returned. If not given, all columns are used. - filters (list[str], optional): - Filters to apply during loading. If not given, no filters - are applied. max_vec_sizes (dict[std, int], optional): Size of each column that consists of vectors. Required when using vector based columns. vec_padding (int): Value to pad vectors with if the vector is smaller than the given max vector length. Defaults is 0 - target (str, optional): Column that is used as target. + target (str|list[str], optional): + Column(s) used as target. weights (str, optional): Column used to weight events. Can only be used when a target is given. @@ -158,10 +123,17 @@ def __init__( shuffle (bool): Batches consist of random events and are shuffled every epoch. Defaults to True. + drop_remainder (bool): + Drop the remainder of data that is too small to compose full batch. + Defaults to True. """ + import ROOT + from ROOT import RDF + try: import numpy as np + except ImportError: raise ImportError( "Failed to import NumPy during init. NumPy is required when \ @@ -180,45 +152,63 @@ def __init__( given value is {validation_split}" ) - # TODO: better linking when importing into ROOT - # ROOT.gInterpreter.ProcessLine( - # f'#include "{main_folder}Cpp_files/RBatchGenerator.cpp"') + self.noded_rdf = RDF.AsRNode(rdataframe) + + if ROOT.Internal.RDF.GetDataSourceLabel(self.noded_rdf) != "TTreeDS": + raise ValueError( + "RNode object must be created out of TTrees or files of TTree" + ) + + if isinstance(target, str): + target = [target] - self.target_column = target + self.target_columns = target self.weights_column = weights template, max_vec_sizes_list = self.get_template( - tree_name, file_name, columns, max_vec_sizes + rdataframe, columns, max_vec_sizes ) self.num_columns = len(self.all_columns) self.batch_size = batch_size # Handle target - self.target_given = len(self.target_column) > 0 + self.target_given = len(self.target_columns) > 0 + self.weights_given = len(self.weights_column) > 0 if self.target_given: - if target in self.all_columns: - self.target_index = self.all_columns.index(self.target_column) - else: - raise ValueError( - f"Provided target not in given columns: \ntarget => \ - {target}\ncolumns => {self.all_columns}" - ) + for target in self.target_columns: + if target not in self.all_columns: + raise ValueError( + f"Provided target not in given columns: \ntarget => \ + {target}\ncolumns => {self.all_columns}") - # Handle weights - self.weights_given = len(self.weights_column) > 0 - if self.weights_given and not self.target_given: - raise ValueError("Weights can only be used when a target is provided") - if self.weights_given: - if weights in self.all_columns: - self.weights_index = self.all_columns.index(self.weights_column) + self.target_indices = [self.all_columns.index( + target) for target in self.target_columns] + + # Handle weights + if self.weights_given: + if weights in self.all_columns: + self.weights_index = self.all_columns.index( + self.weights_column) + self.train_indices = [c for c in range( + len(self.all_columns)) if c not in self.target_indices+[self.weights_index]] + else: + raise ValueError( + f"Provided weights not in given columns: \nweights => \ + {weights}\ncolumns => {self.all_columns}" + ) else: - raise ValueError( - f"Provided weights not in given columns: \nweights => \ - {weights}\ncolumns => {self.all_columns}" - ) + self.train_indices = [c for c in range( + len(self.all_columns)) if c not in self.target_indices] - self.train_columns = [c for c in self.all_columns if c not in [target, weights]] + elif self.weights_given: + raise ValueError( + "Weights can only be used when a target is provided") + else: + self.train_indices = [c for c in range(len(self.all_columns))] + + self.train_columns = [ + c for c in self.all_columns if c not in self.target_columns+[self.weights_column]] from ROOT import TMVA, EnableThreadSafety @@ -228,28 +218,22 @@ def __init__( # cling via cppyy) and the I/O thread. EnableThreadSafety() - expanded_filter = " && ".join(["(" + fltr + ")" for fltr in filters]) - self.generator = TMVA.Experimental.Internal.RBatchGenerator(template)( - tree_name, - file_name, + self.noded_rdf, chunk_size, batch_size, self.given_columns, - expanded_filter, + self.num_columns, max_vec_sizes_list, vec_padding, validation_split, max_chunks, - self.num_columns, shuffle, + drop_remainder, ) atexit.register(self.DeActivate) - def StartValidation(self): - self.generator.StartValidation() - @property def is_active(self): return self.generator.IsActive() @@ -259,7 +243,7 @@ def Activate(self): self.generator.Activate() def DeActivate(self): - """Initialize the generator to be used for a loop""" + """Deactivate the generator""" self.generator.DeActivate() def GetSample(self): @@ -281,14 +265,25 @@ def GetSample(self): return np.zeros((self.batch_size, self.num_columns)) if not self.weights_given: + if len(self.target_indices) == 1: + return np.zeros((self.batch_size, self.num_columns - 1)), np.zeros( + (self.batch_size)).reshape(-1, 1) + return np.zeros((self.batch_size, self.num_columns - 1)), np.zeros( - (self.batch_size) + (self.batch_size, len(self.target_indices)) + ) + + if len(self.target_indices) == 1: + return ( + np.zeros((self.batch_size, self.num_columns - 2)), + np.zeros((self.batch_size)).reshape(-1, 1), + np.zeros((self.batch_size)).reshape(-1, 1), ) return ( np.zeros((self.batch_size, self.num_columns - 2)), - np.zeros((self.batch_size)), - np.zeros((self.batch_size)), + np.zeros((self.batch_size, len(self.target_indices))), + np.zeros((self.batch_size)).reshape(-1, 1), ) def ConvertBatchToNumpy(self, batch: "RTensor") -> np.ndarray: @@ -306,35 +301,30 @@ def ConvertBatchToNumpy(self, batch: "RTensor") -> np.ndarray: raise ImportError("Failed to import numpy in batchgenerator init") data = batch.GetData() - data.reshape((self.batch_size * self.num_columns,)) + batch_size, num_columns = tuple(batch.GetShape()) - return_data = np.array(data).reshape(self.batch_size, self.num_columns) + data.reshape((batch_size * num_columns,)) - # Splice target column from the data if weight is given + return_data = np.asarray(data).reshape(batch_size, num_columns) + + # Splice target column from the data if target is given if self.target_given: - target_data = return_data[:, self.target_index] - return_data = np.column_stack( - ( - return_data[:, : self.target_index], - return_data[:, self.target_index + 1 :], - ) - ) + train_data = return_data[:, self.train_indices] + target_data = return_data[:, self.target_indices] - # Splice weights column from the data if weight is given + # Splice weight column from the data if weight is given if self.weights_given: - if self.target_index < self.weights_index: - self.weights_index -= 1 - weights_data = return_data[:, self.weights_index] - return_data = np.column_stack( - ( - return_data[:, : self.weights_index], - return_data[:, self.weights_index + 1 :], - ) - ) - return return_data, target_data, weights_data - return return_data, target_data + if len(self.target_indices) == 1: + return train_data, target_data.reshape(-1, 1), weights_data.reshape(-1, 1) + + return train_data, target_data, weights_data.reshape(-1, 1) + + if len(self.target_indices) == 1: + return train_data, target_data.reshape(-1, 1) + + return train_data, target_data return return_data @@ -348,58 +338,77 @@ def ConvertBatchToPyTorch(self, batch: Any) -> torch.Tensor: torch.Tensor: converted batch """ import torch + import numpy as np data = batch.GetData() - data.reshape((self.batch_size * self.num_columns,)) + batch_size, num_columns = tuple(batch.GetShape()) - return_data = torch.Tensor(data).reshape(self.batch_size, self.num_columns) + data.reshape((batch_size * num_columns,)) - # Splice target column from the data if weight is given + return_data = torch.as_tensor(np.asarray(data)).reshape( + batch_size, num_columns) + + # Splice target column from the data if target is given if self.target_given: - target_data = return_data[:, self.target_index] - return_data = torch.column_stack( - ( - return_data[:, : self.target_index], - return_data[:, self.target_index + 1 :], - ) - ) + train_data = return_data[:, self.train_indices] + target_data = return_data[:, self.target_indices] - # Splice weights column from the data if weight is given + # Splice weight column from the data if weight is given if self.weights_given: - if self.target_index < self.weights_index: - self.weights_index -= 1 - weights_data = return_data[:, self.weights_index] - return_data = torch.column_stack( - ( - return_data[:, : self.weights_index], - return_data[:, self.weights_index + 1 :], - ) - ) - return return_data, target_data, weights_data - return return_data, target_data + if len(self.target_indices) == 1: + return train_data, target_data.reshape(-1, 1), weights_data.reshape(-1, 1) + + return train_data, target_data, weights_data.reshape(-1, 1) + + if len(self.target_indices) == 1: + return train_data, target_data.reshape(-1, 1) + + return train_data, target_data return return_data - def ConvertBatchToTF(self, batch: Any) -> np.ndarray: + def ConvertBatchToTF(self, batch: Any) -> Any: """ - PLACEHOLDER: at this moment this function only calls the - ConvertBatchToNumpy function. In the Future this function can be - used to convert to TF tensors directly + Convert a RTensor into a TensorFlow tensor Args: batch (RTensor): Batch returned from the RBatchGenerator Returns: - np.ndarray: converted batch + tensorflow.Tensor: converted batch """ - # import tensorflow as tf + import tensorflow as tf + + data = batch.GetData() + batch_size, num_columns = tuple(batch.GetShape()) - batch = self.ConvertBatchToNumpy(batch) + data.reshape((batch_size * num_columns,)) - # TODO: improve this by returning tensorflow tensors - return batch + return_data = tf.constant(data, shape=(batch_size, num_columns)) + + if batch_size != self.batch_size: + return_data = tf.pad(return_data, tf.constant( + [[0, self.batch_size - batch_size], [0, 0]])) + + # Splice target column from the data if weight is given + if self.target_given: + train_data = tf.gather( + return_data, indices=self.train_indices, axis=1) + target_data = tf.gather( + return_data, indices=self.target_indices, axis=1) + + # Splice weight column from the data if weight is given + if self.weights_given: + weights_data = tf.gather(return_data, indices=[ + self.weights_index], axis=1) + + return train_data, target_data, weights_data + + return train_data, target_data + + return return_data # Return a batch when available def GetTrainBatch(self) -> Any: @@ -478,13 +487,21 @@ def train_columns(self) -> list[str]: return self.base_generator.train_columns @property - def target_column(self) -> str: - return self.base_generator.target_column + def target_columns(self) -> str: + return self.base_generator.target_columns @property def weights_column(self) -> str: return self.base_generator.weights_column + @property + def number_of_batches(self) -> int: + return self.base_generator.generator.NumberOfTrainingBatches() + + @property + def last_batch_no_of_rows(self) -> int: + return self.base_generator.generator.TrainRemainderRows() + def __iter__(self): self._callable = self.__call__() @@ -509,7 +526,7 @@ def __call__(self) -> Any: while True: batch = self.base_generator.GetTrainBatch() - if not batch: + if batch is None: break yield self.conversion_function(batch) @@ -542,13 +559,21 @@ def train_columns(self) -> list[str]: return self.base_generator.train_columns @property - def target_column(self) -> str: - return self.base_generator.target_column + def target_columns(self) -> str: + return self.base_generator.target_columns @property def weights_column(self) -> str: return self.base_generator.weights_column + @property + def number_of_batches(self) -> int: + return self.base_generator.generator.NumberOfValidationBatches() + + @property + def last_batch_no_of_rows(self) -> int: + return self.base_generator.generator.ValidationRemainderRows() + def __iter__(self): self._callable = self.__call__() @@ -571,8 +596,6 @@ def __call__(self) -> Any: if self.base_generator.is_active: self.base_generator.DeActivate() - self.base_generator.StartValidation() - while True: batch = self.base_generator.GetValidationBatch() @@ -583,41 +606,37 @@ def __call__(self) -> Any: def CreateNumPyGenerators( - tree_name: str, - file_name: str, + rdataframe: RNode, batch_size: int, chunk_size: int, columns: list[str] = list(), - filters: list[str] = list(), max_vec_sizes: dict[str, int] = dict(), vec_padding: int = 0, - target: str = "", + target: str | list[str] = list(), weights: str = "", validation_split: float = 0.0, max_chunks: int = 0, shuffle: bool = True, + drop_remainder=True, ) -> Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]: """ - Return two batch generators based on the given ROOT file and tree. + Return two batch generators based on the given ROOT file and tree or RDataFrame The first generator returns training batches, while the second generator returns validation batches Args: - tree_name (str): Name of the tree in the ROOT file - file_name (str): Path to the ROOT file + rdataframe (RNode): Name of RNode object. batch_size (int): Size of the returned chunks. chunk_size (int): The size of the chunks loaded from the ROOT file. Higher chunk size results in better randomization, but also higher memory usage. columns (list[str], optional): Columns to be returned. If not given, all columns are used. - filters (list[str], optional): - Filters to apply. If not given, no filters are applied. max_vec_sizes (list[int], optional): Size of each column that consists of vectors. Required when using vector based columns - target (str, optional): - Column that is used as target. + target (str|list[str], optional): + Column(s) used as target. weights (str, optional): Column used to weight events. Can only be used when a target is given @@ -628,22 +647,36 @@ def CreateNumPyGenerators( The number of chunks that should be loaded for an epoch. If not given, the whole file is used shuffle (bool): - randomize the training batches every epoch. Defaults to True + randomize the training batches every epoch. + Defaults to True + drop_remainder (bool): + Drop the remainder of data that is too small to compose full batch. + Defaults to True. + Let a data list [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] with batch_size=4 be + given. + If drop_remainder = True, then two batches [0, 1, 2, 3] and + [4, 5, 6, 7] will be returned. + If drop_remainder = False, then three batches [0, 1, 2, 3], + [4, 5, 6, 7] and [8, 9] will be returned. Returns: - Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]: - Two generators are returned. One used to load training batches, - and one to load validation batches. NOTE: the validation batches - are loaded during the training. Before training, the validation - generator will return no batches. + TrainRBatchGenerator or + Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]: + If validation split is 0, return TrainBatchGenerator. + + Otherwise two generators are returned. One used to load training + batches, and one to load validation batches. NOTE: the validation + batches are loaded during the training. Before training, the + validation generator will return no batches. """ + + import numpy as np + base_generator = BaseGenerator( - tree_name, - file_name, + rdataframe, batch_size, chunk_size, columns, - filters, max_vec_sizes, vec_padding, target, @@ -651,11 +684,16 @@ def CreateNumPyGenerators( validation_split, max_chunks, shuffle, + drop_remainder, ) train_generator = TrainRBatchGenerator( base_generator, base_generator.ConvertBatchToNumpy ) + + if validation_split == 0.0: + return train_generator, None + validation_generator = ValidationRBatchGenerator( base_generator, base_generator.ConvertBatchToNumpy ) @@ -664,41 +702,37 @@ def CreateNumPyGenerators( def CreateTFDatasets( - tree_name: str, - file_name: str, + rdataframe: RNode, batch_size: int, chunk_size: int, columns: list[str] = list(), - filters: list[str] = list(), max_vec_sizes: dict[str, int] = dict(), vec_padding: int = 0, - target: str = "", + target: str | list[str] = list(), weights: str = "", validation_split: float = 0.0, max_chunks: int = 0, shuffle: bool = True, + drop_remainder=True, ) -> Tuple[tf.data.Dataset, tf.data.Dataset]: """ - Return two Tensorflow Datasets based on the given ROOT file and tree + Return two Tensorflow Datasets based on the given ROOT file and tree or RDataFrame The first generator returns training batches, while the second generator returns validation batches Args: - tree_name (str): Name of the tree in the ROOT file - file_name (str): Path to the ROOT file + rdataframe (RNode): Name of RNode object. batch_size (int): Size of the returned chunks. chunk_size (int): The size of the chunks loaded from the ROOT file. Higher chunk size results in better randomization, but also higher memory usage. columns (list[str], optional): Columns to be returned. If not given, all columns are used. - filters (list[str], optional): - Filters to apply. If not given, no filters are applied. max_vec_sizes (list[int], optional): Size of each column that consists of vectors. Required when using vector based columns - target (str, optional): - Column that is used as target. + target (str|list[str], optional): + Column(s) used as target. weights (str, optional): Column used to weight events. Can only be used when a target is given @@ -709,24 +743,35 @@ def CreateTFDatasets( The number of chunks that should be loaded for an epoch. If not given, the whole file is used shuffle (bool): - randomize the training batches every epoch. Defaults to True + randomize the training batches every epoch. + Defaults to True + drop_remainder (bool): + Drop the remainder of data that is too small to compose full batch. + Defaults to True. + Let a data list [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] with batch_size=4 be + given. + If drop_remainder = True, then two batches [0, 1, 2, 3] and + [4, 5, 6, 7] will be returned. + If drop_remainder = False, then three batches [0, 1, 2, 3], + [4, 5, 6, 7] and [8, 9] will be returned. Returns: - Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]: - Two generators are returned. One used to load training batches, - and one to load validation batches. NOTE: the validation batches - are loaded during the training. Before training, the validation - generator will return no batches. + TrainRBatchGenerator or + Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]: + If validation split is 0, return TrainBatchGenerator. + + Otherwise two generators are returned. One used to load training + batches, and one to load validation batches. NOTE: the validation + batches are loaded during the training. Before training, the + validation generator will return no batches. """ import tensorflow as tf base_generator = BaseGenerator( - tree_name, - file_name, + rdataframe, batch_size, chunk_size, columns, - filters, max_vec_sizes, vec_padding, target, @@ -734,6 +779,7 @@ def CreateTFDatasets( validation_split, max_chunks, shuffle, + drop_remainder, ) train_generator = TrainRBatchGenerator( @@ -743,27 +789,32 @@ def CreateTFDatasets( base_generator, base_generator.ConvertBatchToTF ) - num_columns = len(train_generator.train_columns) + num_train_columns = len(train_generator.train_columns) + num_target_columns = len(train_generator.target_columns) # No target and weights given if target == "": batch_signature = tf.TensorSpec( - shape=(batch_size, num_columns), dtype=tf.float32 + shape=(batch_size, num_train_columns), dtype=tf.float32 ) # Target given, no weights given elif weights == "": batch_signature = ( - tf.TensorSpec(shape=(batch_size, num_columns), dtype=tf.float32), - tf.TensorSpec(shape=(batch_size,), dtype=tf.float32), + tf.TensorSpec(shape=(batch_size, num_train_columns), + dtype=tf.float32), + tf.TensorSpec(shape=(batch_size, num_target_columns), + dtype=tf.float32), ) # Target and weights given else: batch_signature = ( - tf.TensorSpec(shape=(batch_size, num_columns), dtype=tf.float32), - tf.TensorSpec(shape=(batch_size,), dtype=tf.float32), - tf.TensorSpec(shape=(batch_size,), dtype=tf.float32), + tf.TensorSpec(shape=(batch_size, num_train_columns), + dtype=tf.float32), + tf.TensorSpec(shape=(batch_size, num_target_columns), + dtype=tf.float32), + tf.TensorSpec(shape=(batch_size, 1), dtype=tf.float32), ) ds_train = tf.data.Dataset.from_generator( @@ -773,8 +824,12 @@ def CreateTFDatasets( # Give access to the columns function of the training set setattr(ds_train, "columns", train_generator.columns) setattr(ds_train, "train_columns", train_generator.train_columns) - setattr(ds_train, "target_column", train_generator.target_column) + setattr(ds_train, "target_column", train_generator.target_columns) setattr(ds_train, "weights_column", train_generator.weights_column) + setattr(ds_train, "number_of_batches", train_generator.number_of_batches) + + if validation_split == 0.0: + return ds_train ds_validation = tf.data.Dataset.from_generator( validation_generator, output_signature=batch_signature @@ -783,48 +838,46 @@ def CreateTFDatasets( # Give access to the columns function of the validation set setattr(ds_validation, "columns", train_generator.columns) setattr(ds_validation, "train_columns", train_generator.train_columns) - setattr(ds_validation, "target_column", train_generator.target_column) + setattr(ds_validation, "target_column", train_generator.target_columns) setattr(ds_validation, "weights_column", train_generator.weights_column) + setattr(ds_validation, "number_of_batches", + validation_generator.number_of_batches) return ds_train, ds_validation def CreatePyTorchGenerators( - tree_name: str, - file_name: str, + rdataframe: RNode, batch_size: int, chunk_size: int, columns: list[str] = list(), - filters: list[str] = list(), max_vec_sizes: dict[str, int] = dict(), vec_padding: int = 0, - target: str = "", + target: str | list[str] = list(), weights: str = "", validation_split: float = 0.0, max_chunks: int = 0, shuffle: bool = True, + drop_remainder=True, ) -> Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]: """ - Return two Tensorflow Datasets based on the given ROOT file and tree + Return two Tensorflow Datasets based on the given ROOT file and tree or RDataFrame The first generator returns training batches, while the second generator returns validation batches Args: - tree_name (str): Name of the tree in the ROOT file - file_name (str): Path to the ROOT file + rdataframe (RNode): Name of RNode object. batch_size (int): Size of the returned chunks. chunk_size (int): The size of the chunks loaded from the ROOT file. Higher chunk size results in better randomization, but also higher memory usage. columns (list[str], optional): Columns to be returned. If not given, all columns are used. - filters (list[str], optional): - Filters to apply. If not given, no filters are applied. max_vec_sizes (list[int], optional): Size of each column that consists of vectors. Required when using vector based columns - target (str, optional): - Column that is used as target. + target (str|list[str], optional): + Column(s) used as target. weights (str, optional): Column used to weight events. Can only be used when a target is given @@ -835,22 +888,33 @@ def CreatePyTorchGenerators( The number of chunks that should be loaded for an epoch. If not given, the whole file is used shuffle (bool): - randomize the training batches every epoch. Defaults to True + randomize the training batches every epoch. + Defaults to True + drop_remainder (bool): + Drop the remainder of data that is too small to compose full batch. + Defaults to True. + Let a data list [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] with batch_size=4 be + given. + If drop_remainder = True, then two batches [0, 1, 2, 3] and + [4, 5, 6, 7] will be returned. + If drop_remainder = False, then three batches [0, 1, 2, 3], + [4, 5, 6, 7] and [8, 9] will be returned. Returns: - Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]: - Two generators are returned. One used to load training batches, - and one to load validation batches. NOTE: the validation batches - are loaded during the training. Before training, the validation - generator will return no batches. + TrainRBatchGenerator or + Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]: + If validation split is 0, return TrainBatchGenerator. + + Otherwise two generators are returned. One used to load training + batches, and one to load validation batches. NOTE: the validation + batches are loaded during the training. Before training, the + validation generator will return no batches. """ base_generator = BaseGenerator( - tree_name, - file_name, + rdataframe, batch_size, chunk_size, columns, - filters, max_vec_sizes, vec_padding, target, @@ -858,11 +922,16 @@ def CreatePyTorchGenerators( validation_split, max_chunks, shuffle, + drop_remainder, ) train_generator = TrainRBatchGenerator( base_generator, base_generator.ConvertBatchToPyTorch ) + + if validation_split == 0.0: + return train_generator + validation_generator = ValidationRBatchGenerator( base_generator, base_generator.ConvertBatchToPyTorch ) diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tstyle.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tstyle.py new file mode 100644 index 0000000000000..aed3a901a5aa9 --- /dev/null +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tstyle.py @@ -0,0 +1,29 @@ +# Author: Jonas Rembser CERN 11/2024 + +################################################################################ +# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. # +# All rights reserved. # +# # +# For the licensing terms see $ROOTSYS/LICENSE. # +# For the list of contributors see $ROOTSYS/README/CREDITS. # +################################################################################ + +from . import pythonization + + +def _TStyle_Constructor(self, *args, **kwargs): + """ + Forward the arguments to the C++ constructor and retain ownership. This + helps avoiding double deletes due to ROOT automatic memory management. + """ + self._cpp_constructor(*args, **kwargs) + import ROOT + + ROOT.SetOwnership(self, False) + + +@pythonization("TStyle") +def pythonize_tstyle(klass): + + klass._cpp_constructor = klass.__init__ + klass.__init__ = _TStyle_Constructor diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_ttree.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_ttree.py index 1957db444c843..36a4b53f97409 100644 --- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_ttree.py +++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_ttree.py @@ -133,7 +133,7 @@ from libROOTPythonizations import GetBranchAttr, BranchPyz from ._rvec import _array_interface_dtype_map, _get_cpp_type_from_numpy_type from . import pythonization - +from ROOT._pythonization._memory_utils import _should_give_up_ownership, _constructor_releasing_ownership, _SetDirectory_SetOwnership # TTree iterator def _TTree__iter__(self): @@ -279,6 +279,18 @@ def _TTree__getattr__(self, key): out = cppyy.ll.cast[cast_type](out) return out +def _TTree_CloneTree(self, *args, **kwargs): + """ + Forward the arguments to the C++ function and give up ownership if the + TTree is attached to a TFile, which is the owner in that case. + """ + import ROOT + + out_tree = self._CloneTree(*args, **kwargs) + if _should_give_up_ownership(out_tree): + ROOT.SetOwnership(out_tree, False) + + return out_tree @pythonization("TTree") def pythonize_ttree(klass, name): @@ -286,6 +298,14 @@ def pythonize_ttree(klass, name): # klass: class to be pythonized # name: string containing the name of the class + # Functions that need to drop the ownership if the current directory is a TFile + + klass._cpp_constructor = klass.__init__ + klass.__init__ = _constructor_releasing_ownership + + klass._CloneTree = klass.CloneTree + klass.CloneTree = _TTree_CloneTree + # Pythonizations that are common to TTree and its subclasses. # To avoid duplicating the same logic in the pythonizors of # the subclasses, inject the pythonizations for all the target @@ -305,6 +325,9 @@ def pythonize_ttree(klass, name): klass._OriginalBranch = klass.Branch klass.Branch = _Branch + klass._Original_SetDirectory = klass.SetDirectory + klass.SetDirectory = _SetDirectory_SetOwnership + @pythonization("TChain") def pythonize_tchain(klass): @@ -321,3 +344,10 @@ def pythonize_tchain(klass): # SetBranchAddress klass._OriginalSetBranchAddress = klass.SetBranchAddress klass.SetBranchAddress = _SetBranchAddress + +@pythonization("TNtuple") +def pythonize_tchain(klass): + + # The constructor needs to be explicitly pythonized for derived classes. + klass._cpp_constructor = klass.__init__ + klass.__init__ = _constructor_releasing_ownership diff --git a/bindings/pyroot/pythonizations/src/PyROOTModule.cxx b/bindings/pyroot/pythonizations/src/PyROOTModule.cxx index 070176e18ffdb..efed441222db5 100644 --- a/bindings/pyroot/pythonizations/src/PyROOTModule.cxx +++ b/bindings/pyroot/pythonizations/src/PyROOTModule.cxx @@ -11,7 +11,6 @@ // Bindings #include "PyROOTPythonize.h" -#include "PyROOTWrapper.h" #include "RPyROOTApplication.h" // Cppyy @@ -20,6 +19,7 @@ #include "../../cppyy/CPyCppyy/src/ProxyWrappers.h" // ROOT +#include "TInterpreter.h" #include "TROOT.h" #include "TSystem.h" #include "RConfigure.h" @@ -82,8 +82,6 @@ static PyMethodDef gPyROOTMethods[] = { (char *)"Install an input hook to process GUI events"}, {(char *)"_CPPInstance__expand__", (PyCFunction)PyROOT::CPPInstanceExpand, METH_VARARGS, (char *)"Deserialize a pickled object"}, - {(char *)"ClearProxiedObjects", (PyCFunction)PyROOT::ClearProxiedObjects, METH_NOARGS, - (char *)"Clear proxied objects regulated by PyROOT"}, {(char *)"JupyROOTExecutor", (PyCFunction)JupyROOTExecutor, METH_VARARGS, (char *)"Create JupyROOTExecutor"}, {(char *)"JupyROOTDeclarer", (PyCFunction)JupyROOTDeclarer, METH_VARARGS, (char *)"Create JupyROOTDeclarer"}, {(char *)"JupyROOTExecutorHandler_Clear", (PyCFunction)JupyROOTExecutorHandler_Clear, METH_NOARGS, @@ -146,8 +144,13 @@ extern "C" PyObject *PyInit_libROOTPythonizations() // keep gRootModule, but do not increase its reference count even as it is borrowed, // or a self-referencing cycle would be created - // setup PyROOT - PyROOT::Init(); + // Initialize and acquire the GIL to allow for threading in ROOT +#if PY_VERSION_HEX < 0x03090000 + PyEval_InitThreads(); +#endif + + // Make sure the interpreter is initialized once gROOT has been initialized + TInterpreter::Instance(); // signal policy: don't abort interpreter in interactive mode CallContext::SetGlobalSignalPolicy(!gROOT->IsBatch()); diff --git a/bindings/pyroot/pythonizations/src/PyROOTWrapper.cxx b/bindings/pyroot/pythonizations/src/PyROOTWrapper.cxx deleted file mode 100644 index 42c3988a30f56..0000000000000 --- a/bindings/pyroot/pythonizations/src/PyROOTWrapper.cxx +++ /dev/null @@ -1,74 +0,0 @@ -// Author: Enric Tejedor CERN 06/2018 -// Original PyROOT code by Wim Lavrijsen, LBL - -/************************************************************************* - * Copyright (C) 1995-2018, Rene Brun and Fons Rademakers. * - * All rights reserved. * - * * - * For the licensing terms see $ROOTSYS/LICENSE. * - * For the list of contributors see $ROOTSYS/README/CREDITS. * - *************************************************************************/ - -// Bindings -#include "PyROOTWrapper.h" -#include "TMemoryRegulator.h" - -// Cppyy -#include "CPyCppyy/API.h" - -// ROOT -#include "TROOT.h" -#include "TSystem.h" -#include "TClass.h" -#include "TInterpreter.h" -#include "DllImport.h" - -namespace PyROOT { -R__EXTERN PyObject *gRootModule; -} - -using namespace PyROOT; - -namespace { - -static void AddToGlobalScope(const char *label, TObject *obj, const char *classname) -{ - // Bind the given object with the given class in the global scope with the - // given label for its reference. - PyModule_AddObject(gRootModule, label, CPyCppyy::Instance_FromVoidPtr(obj, classname)); -} - -} // unnamed namespace - -PyROOT::RegulatorCleanup &GetRegulatorCleanup() -{ - // The object is thread-local because it can happen that we call into - // C++ code (from the PyROOT CPython extension, from CPyCppyy or from cling) - // from different Python threads. A notable example is within a distributed - // RDataFrame application running on Dask. - thread_local PyROOT::RegulatorCleanup m; - return m; -} - -void PyROOT::Init() -{ - // Initialize and acquire the GIL to allow for threading in ROOT -#if PY_VERSION_HEX < 0x03090000 - PyEval_InitThreads(); -#endif - - // Memory management - gROOT->GetListOfCleanups()->Add(&GetRegulatorCleanup()); - - // Bind ROOT globals that will be needed in ROOT.py - AddToGlobalScope("gROOT", gROOT, gROOT->IsA()->GetName()); - AddToGlobalScope("gSystem", gSystem, gSystem->IsA()->GetName()); - AddToGlobalScope("gInterpreter", gInterpreter, gInterpreter->IsA()->GetName()); -} - -PyObject *PyROOT::ClearProxiedObjects(PyObject * /* self */, PyObject * /* args */) -{ - // Delete all memory-regulated objects - GetRegulatorCleanup().CallClearProxiedObjects(); - Py_RETURN_NONE; -} diff --git a/bindings/pyroot/pythonizations/src/TMemoryRegulator.cxx b/bindings/pyroot/pythonizations/src/TMemoryRegulator.cxx deleted file mode 100644 index 4502d826b2bab..0000000000000 --- a/bindings/pyroot/pythonizations/src/TMemoryRegulator.cxx +++ /dev/null @@ -1,108 +0,0 @@ - -// Author: Enric Tejedor CERN 08/2019 -// Author: Vincenzo Eduardo Padulano CERN 05/2024 - -/************************************************************************* - * Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. * - * All rights reserved. * - * * - * For the licensing terms see $ROOTSYS/LICENSE. * - * For the list of contributors see $ROOTSYS/README/CREDITS. * - *************************************************************************/ - -#include "TMemoryRegulator.h" - -#include "../../cppyy/CPyCppyy/src/ProxyWrappers.h" -#include "../../cppyy/CPyCppyy/src/CPPInstance.h" - -//////////////////////////////////////////////////////////////////////////// -/// \brief Constructor. Registers the hooks to run on Cppyy's object -/// construction and destruction -PyROOT::TMemoryRegulator::TMemoryRegulator() -{ - CPyCppyy::MemoryRegulator::SetRegisterHook( - [this](Cppyy::TCppObject_t cppobj, Cppyy::TCppType_t klass) { return this->RegisterHook(cppobj, klass); }); - CPyCppyy::MemoryRegulator::SetUnregisterHook( - [this](Cppyy::TCppObject_t cppobj, Cppyy::TCppType_t klass) { return this->UnregisterHook(cppobj, klass); }); -} - -//////////////////////////////////////////////////////////////////////////// -/// \brief Register a hook that Cppyy runs when constructing an object. -/// \param[in] cppobj Address of the object. -/// \param[in] klass Class id of the object. -/// \return Pair of two booleans. First indicates success, second tells -/// Cppyy if we want to continue running RegisterPyObject -std::pair PyROOT::TMemoryRegulator::RegisterHook(Cppyy::TCppObject_t cppobj, Cppyy::TCppType_t klass) -{ - static Cppyy::TCppType_t tobjectTypeID = (Cppyy::TCppType_t)Cppyy::GetScope("TObject"); - - if (Cppyy::IsSubtype(klass, tobjectTypeID)) { - fObjectMap.insert({cppobj, klass}); - } - - return {true, true}; -} - -//////////////////////////////////////////////////////////////////////////// -/// \brief Register a hook that Cppyy runs when deleting an object. -/// \param[in] cppobj Address of the object. -/// \param[in] klass Class id of the object. -/// \return Pair of two booleans. First indicates success, second tells -/// Cppyy if we want to continue running UnRegisterPyObject -std::pair PyROOT::TMemoryRegulator::UnregisterHook(Cppyy::TCppObject_t cppobj, Cppyy::TCppType_t klass) -{ - - static Cppyy::TCppType_t tobjectTypeID = (Cppyy::TCppType_t)Cppyy::GetScope("TObject"); - - if (Cppyy::IsSubtype(klass, tobjectTypeID)) { - if (auto it = fObjectMap.find(cppobj); it != fObjectMap.end()) - fObjectMap.erase(it); - } - - return {true, true}; -} - -//////////////////////////////////////////////////////////////////////////// -/// \brief Get the class id of the TObject being deleted and run Cppyy's -/// RecursiveRemove. -/// \param[in] object Object being destructed. -void PyROOT::TMemoryRegulator::CallCppyyRecursiveRemove(TObject *object) -{ - auto cppobj = reinterpret_cast(object); - - if (auto it = fObjectMap.find(cppobj); it != fObjectMap.end()) { - CPyCppyy::MemoryRegulator::RecursiveRemove(cppobj, it->second); - fObjectMap.erase(it); - } -} - -//////////////////////////////////////////////////////////////////////////// -/// \brief Clean up all tracked objects. -void PyROOT::TMemoryRegulator::ClearProxiedObjects() -{ - while (!fObjectMap.empty()) { - auto elem = fObjectMap.begin(); - auto cppobj = elem->first; - auto klassid = elem->second; - auto pyclass = CPyCppyy::CreateScopeProxy(klassid); - auto pyobj = (CPyCppyy::CPPInstance *)CPyCppyy::MemoryRegulator::RetrievePyObject(cppobj, pyclass); - - if (pyobj && (pyobj->fFlags & CPyCppyy::CPPInstance::kIsOwner)) { - // Only delete the C++ object if the Python proxy owns it. - // If it is a value, cppyy deletes it in RecursiveRemove as part of - // the proxy cleanup. - auto o = static_cast(cppobj); - bool isValue = pyobj->fFlags & CPyCppyy::CPPInstance::kIsValue; - CallCppyyRecursiveRemove(o); - if (!isValue) - delete o; - } else { - // Non-owning proxy, just unregister to clean tables. - // The proxy deletion by Python will have no effect on C++, so all good - bool ret = CPyCppyy::MemoryRegulator::UnregisterPyObject(pyobj, pyclass); - if (!ret) { - fObjectMap.erase(elem); - } - } - } -} diff --git a/bindings/pyroot/pythonizations/src/TMemoryRegulator.h b/bindings/pyroot/pythonizations/src/TMemoryRegulator.h deleted file mode 100644 index c2d50f7a41149..0000000000000 --- a/bindings/pyroot/pythonizations/src/TMemoryRegulator.h +++ /dev/null @@ -1,96 +0,0 @@ - -// Author: Enric Tejedor CERN 08/2019 -// Author: Vincenzo Eduardo Padulano CERN 05/2024 - -/************************************************************************* - * Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. * - * All rights reserved. * - * * - * For the licensing terms see $ROOTSYS/LICENSE. * - * For the list of contributors see $ROOTSYS/README/CREDITS. * - *************************************************************************/ - -#ifndef PYROOT_TMEMORYREGULATOR_H -#define PYROOT_TMEMORYREGULATOR_H - -////////////////////////////////////////////////////////////////////////// -// // -// TMemoryRegulator // -// // -// Sets hooks in Cppyy's MemoryRegulator to keep track of the TObjects // -// that are constructed and destructed. For those objects, a map is // -// filled, where the key is the address of the object and the value is // -// the class to which the object belongs. // -// // -// The TMemoryRegulator object, created in PyROOTWrapper.cxx, is added // -// to the list of cleanups and its RecursiveRemove method is called by // -// ROOT to manage the memory of TObjects being deleted. // -// In RecursiveRemove, the object being deleted is already a TNamed, so // -// the information about its actual class is not available anymore. // -// To solve the problem, the map above is used to know the class of the // -// object, so that Cppyy's RecursiveRemove can be called passing the // -// class as argument. // -////////////////////////////////////////////////////////////////////////// - -// Bindings -// CPyCppyy.h must be go first, since it includes Python.h, which must be -// included before any standard header -#include "../../cppyy/CPyCppyy/src/CPyCppyy.h" -#include "../../cppyy/CPyCppyy/src/MemoryRegulator.h" - -// ROOT -#include "TObject.h" - -// Stl -#include - -namespace PyROOT { - -class RegulatorCleanup; - -/// Manages TObject-derived objects created in a PyROOT application -/// -/// This class is responsible to keep track of the creation of the objects -/// that need further memory management within ROOT. The `ClearProxiedObjects` -/// function is only called at PyROOT shutdown time. The `CallCppyyRecursiveRemove` -/// is called as part of the global list of cleanups object destruction. -/// -/// This class is intentionally not derived from TObject. See the -/// `PyROOT::RegulatorCleanup` class for more info. -/// -/// \note This class is not thread-safe on its own. We create one thread-local -/// object in PyROOTWrapper.cxx. -class TMemoryRegulator final { - using ObjectMap_t = std::unordered_map; - - ObjectMap_t fObjectMap{}; // key: object address; value: object class id - - std::pair RegisterHook(Cppyy::TCppObject_t, Cppyy::TCppType_t); - - std::pair UnregisterHook(Cppyy::TCppObject_t, Cppyy::TCppType_t); - - void CallCppyyRecursiveRemove(TObject *object); - - void ClearProxiedObjects(); - - TMemoryRegulator(); - - friend class RegulatorCleanup; -}; - -/// A TObject-derived class to inject the memory regulation logic in the ROOT list of cleanups. -/// -/// The purpose of this class is to keep the responsibilities separate between -/// the TMemoryRegulator logic and the rest of ROOT. -class RegulatorCleanup final : public TObject { - TMemoryRegulator fRegulator{}; - -public: - void RecursiveRemove(TObject *object) final { fRegulator.CallCppyyRecursiveRemove(object); } - void CallClearProxiedObjects() { fRegulator.ClearProxiedObjects(); } - ClassDefInlineNV(RegulatorCleanup, 0); -}; - -} // namespace PyROOT - -#endif // !PYROOT_TMEMORYREGULATOR_H diff --git a/bindings/pyroot/pythonizations/test/CMakeLists.txt b/bindings/pyroot/pythonizations/test/CMakeLists.txt index 40bea60984b2d..5e4b82f70a949 100644 --- a/bindings/pyroot/pythonizations/test/CMakeLists.txt +++ b/bindings/pyroot/pythonizations/test/CMakeLists.txt @@ -138,24 +138,21 @@ endif() ROOT_ADD_PYUNITTEST(pyroot_pyz_tf_pycallables tf_pycallables.py) if(roofit) - # RooAbsCollection and subclasses pythonizations - if(NOT MSVC OR CMAKE_SIZEOF_VOID_P EQUAL 4 OR win_broken_tests) - ROOT_ADD_PYUNITTEST(pyroot_roofit_rooabscollection roofit/rooabscollection.py) - endif() - ROOT_ADD_PYUNITTEST(pyroot_roofit_rooarglist roofit/rooarglist.py) - - # RooDataHist pythonisations - ROOT_ADD_PYUNITTEST(pyroot_roofit_roodatahist_ploton roofit/roodatahist_ploton.py) - # RooDataSet pythonisations - ROOT_ADD_PYUNITTEST(pyroot_roofit_roodataset roofit/roodataset.py) - - # RooWorkspace pythonizations ROOT_ADD_PYUNITTEST(pyroot_roofit_rooabspdf_fitto roofit/rooabspdf_fitto.py) ROOT_ADD_PYUNITTEST(pyroot_roofit_rooabsreal_ploton roofit/rooabsreal_ploton.py) - + ROOT_ADD_PYUNITTEST(pyroot_roofit_rooarglist roofit/rooarglist.py) + ROOT_ADD_PYUNITTEST(pyroot_roofit_roocmdarg roofit/roocmdarg.py) + ROOT_ADD_PYUNITTEST(pyroot_roofit_roodatahist_numpy roofit/roodatahist_numpy.py PYTHON_DEPS numpy) + ROOT_ADD_PYUNITTEST(pyroot_roofit_roodatahist_ploton roofit/roodatahist_ploton.py) + ROOT_ADD_PYUNITTEST(pyroot_roofit_roodataset roofit/roodataset.py) + ROOT_ADD_PYUNITTEST(pyroot_roofit_roodataset_numpy roofit/roodataset_numpy.py PYTHON_DEPS numpy) ROOT_ADD_PYUNITTEST(pyroot_roofit_roolinkedlist roofit/roolinkedlist.py) + if(NOT MSVC OR CMAKE_SIZEOF_VOID_P EQUAL 4 OR win_broken_tests) + ROOT_ADD_PYUNITTEST(pyroot_roofit_rooabscollection roofit/rooabscollection.py) + endif() + if(NOT MSVC OR win_broken_tests) # Test pythonizations for the RooFitHS3 package, which is not built on Windows. ROOT_ADD_PYUNITTEST(pyroot_roofit_roojsonfactorywstool roofit/roojsonfactorywstool.py) @@ -168,10 +165,6 @@ if(roofit) ROOT_ADD_PYUNITTEST(pyroot_roofit_rooworkspace roofit/rooworkspace.py) endif() - # NumPy compatibility - ROOT_ADD_PYUNITTEST(pyroot_roofit_roodataset_numpy roofit/roodataset_numpy.py PYTHON_DEPS numpy) - ROOT_ADD_PYUNITTEST(pyroot_roofit_roodatahist_numpy roofit/roodatahist_numpy.py PYTHON_DEPS numpy) - endif() if (dataframe) @@ -192,3 +185,10 @@ ROOT_ADD_PYUNITTEST(pyroot_tcomplex tcomplex_operators.py) # Tests with memory usage ROOT_ADD_PYUNITTEST(pyroot_memory memory.py) + +# rbatchgenerator tests +# TODO: We currently do not support TensorFlow for Python >= 3.12 (see requirements.txt) +# Update here once that is fixed. +if (NOT MSVC AND Python3_VERSION VERSION_LESS 3.12) + ROOT_ADD_PYUNITTEST(batchgen rbatchgenerator_completeness.py PYTHON_DEPS numpy tensorflow torch) +endif() diff --git a/bindings/pyroot/pythonizations/test/import_load_libs.py b/bindings/pyroot/pythonizations/test/import_load_libs.py index c370a3cd42f2c..24050959a1ebb 100644 --- a/bindings/pyroot/pythonizations/test/import_load_libs.py +++ b/bindings/pyroot/pythonizations/test/import_load_libs.py @@ -40,6 +40,7 @@ class ImportLoadLibs(unittest.TestCase): 'libssl', 'libcrypt.*', # by libssl 'libtbb', + 'libtbb_debug', 'libtbbmalloc', 'liburing', # by libRIO if uring option is enabled # On centos7 libssl links against kerberos pulling in all dependencies below, removed with libssl1.1.0 @@ -69,6 +70,7 @@ class ImportLoadLibs(unittest.TestCase): 'libnss_.*', 'ld.*', 'libffi', + 'libgcc_s', # AddressSanitizer runtime and ROOT configuration 'libclang_rt.asan-.*', 'libROOTSanitizerConfig', diff --git a/bindings/pyroot/pythonizations/test/memory.py b/bindings/pyroot/pythonizations/test/memory.py index da5134616a803..d0b35588388d6 100644 --- a/bindings/pyroot/pythonizations/test/memory.py +++ b/bindings/pyroot/pythonizations/test/memory.py @@ -1,5 +1,6 @@ -import gc import ROOT +import gc +import os import unittest @@ -43,6 +44,160 @@ class foo { delta = after - before self.assertLess(delta, 16) + def test_tstyle_memory_management(self): + """Regression test for https://github.com/root-project/root/issues/16918""" + + h1 = ROOT.TH1F("h1", "", 100, 0, 10) + + style = ROOT.TStyle("NewSTYLE", "") + groot = ROOT.ROOT.GetROOT() + groot.SetStyle(style.GetName()) + groot.ForceStyle() + + def test_tf2_memory_regulation(self): + """Regression test for https://github.com/root-project/root/issues/16942""" + # The test is just that the memory regulation works correctly and the + # application does not segfault + f2 = ROOT.TF2("f2", "sin(x)*sin(y)/x/y") + + def test_tf3_memory_regulation(self): + """Make sure TF3 is properly managed by the memory regulation logic""" + # The test is just that the memory regulation works correctly and the + # application does not segfault + f3 = ROOT.TF3("f3","[0] * sin(x) + [1] * cos(y) + [2] * z",0,10,0,10,0,10) + + def test_tcolor_memory_regulation(self): + """Make sure TColor is properly managed by the memory regulation logic""" + # The test is just that the memory regulation works correctly and the + # application does not segfault + c = ROOT.TColor(42, 42, 42) + + def test_ttree_clone_in_file_context(self): + """Test that CloneTree() doesn't give the ownership to Python when + TFile is opened.""" + + filename = "test_ttree_clone_in_file_context" + + ttree = ROOT.TTree("tree", "tree") + + with ROOT.TFile(filename, "RECREATE") as infile: + ttree_clone = ttree.CloneTree() + + os.remove(filename) + + def _check_object_in_subdir(self, klass, args): + """ + Test that an object which automatically registers with a subdirectory + does not give ownership to Python + """ + filename = "test_object_in_subdir.root" + try: + with ROOT.TFile(filename, "recreate") as f: + f.mkdir("subdir") + f.cd("subdir") + + # Create object by calling the constructor + x = klass(*args) + x.Write() + + # Create object by using the "virtual constructor" TObject::Clone() + x_clone = x.Clone() + x_clone.Write() + finally: + os.remove(filename) + + def test_objects_ownership_with_subdir(self): + """ + Test interaction of various types of objects with automatic directory + registration with a subdirectory of a TFile. + """ + + objs = { + "TH1D": ("h", "h", 10, 0, 10), + "TH1C": ("h", "h", 10, 0, 10), + "TH1S": ("h", "h", 10, 0, 10), + "TH1I": ("h", "h", 10, 0, 10), + "TH1L": ("h", "h", 10, 0, 10), + "TH1F": ("h", "h", 10, 0, 10), + "TH1D": ("h", "h", 10, 0, 10), + "TH1K": ("h", "h", 10, 0, 10), + "TProfile": ("h", "h", 10, 0, 10), + "TH2C": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH2S": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH2I": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH2L": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH2F": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH2D": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH2Poly": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH2PolyBin": tuple(), + "TProfile2D": ("h", "h", 10, 0, 10, 10, 0, 10), + "TProfile2PolyBin": tuple(), + "TProfile2Poly": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH3C": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10), + "TH3S": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10), + "TH3I": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10), + "TH3L": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10), + "TH3F": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10), + "TH3D": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10), + "TProfile3D": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10), + "TGraph2D": (100,), + "TEntryList": ("name", "title"), + "TEventList": ("name", "title"), + "TTree": ("name", "title"), + "TNtuple": ("name", "title", "x:y:z"), + } + for klass, args in objs.items(): + with self.subTest(klass=klass): + self._check_object_in_subdir(getattr(ROOT, klass), args) + + def _check_object_setdirectory(self, klass, classname, args): + """ + Test that registering manually an object with a directory also triggers + a release of ownership from Python to C++. + """ + f1 = ROOT.TMemFile( + "_check_object_setdirectory_in_memory_file_begin", "recreate") + + x = klass(*args) + # TEfficiency does not automatically register with the directory + if not classname == "TEfficiency": + self.assertIs(x.GetDirectory(), f1) + x.SetDirectory(ROOT.nullptr) + self.assertFalse(x.GetDirectory()) + # Make sure that at this point the ownership of the object is with Python + ROOT.SetOwnership(x, True) + + f1.Close() + + f2 = ROOT.TMemFile("_check_object_setdirectory_in_memory_file_end", "recreate") + + # The pythonization should trigger the release of ownership to C++ + x.SetDirectory(f2) + self.assertIs(x.GetDirectory(), f2) + + f2.Close() + + def test_objects_interaction_with_setdirectory(self): + """ + Test interaction of various types of objects with manual registration + to a directory. + """ + + objs = { + "TH1D": ("h", "h", 10, 0, 10), + "TH2D": ("h", "h", 10, 0, 10, 10, 0, 10), + "TH3D": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10), + "TGraph2D": (100,), + "TEfficiency": (ROOT.TH1D("h1", "h1", 10, 0, 10), ROOT.TH1D("h2", "h2", 10, 0, 10)), + "TEntryList": ("name", "title"), + "TEventList": ("name", "title"), + "TTree": ("name", "title"), + "TNtuple": ("name", "title", "x:y:z"), + } + for classname, args in objs.items(): + with self.subTest(classname=classname): + self._check_object_setdirectory(getattr(ROOT, classname), classname, args) + if __name__ == '__main__': unittest.main() diff --git a/bindings/pyroot/pythonizations/test/numbadeclare.py b/bindings/pyroot/pythonizations/test/numbadeclare.py index 08dde20d26d82..0a380523f3ada 100644 --- a/bindings/pyroot/pythonizations/test/numbadeclare.py +++ b/bindings/pyroot/pythonizations/test/numbadeclare.py @@ -77,7 +77,7 @@ def fn1(x): self.assertTrue(hasattr(fn1, "__cpp_wrapper__")) self.assertTrue(type(fn1.__cpp_wrapper__) == str) - self.assertEqual(sys.getrefcount(fn1.__cpp_wrapper__), 3) + self.assertLessEqual(sys.getrefcount(fn1.__cpp_wrapper__), 3) self.assertTrue(hasattr(fn1, "__py_wrapper__")) self.assertTrue(type(fn1.__py_wrapper__) == str) diff --git a/bindings/pyroot/pythonizations/test/rbatchgenerator_completeness.py b/bindings/pyroot/pythonizations/test/rbatchgenerator_completeness.py new file mode 100644 index 0000000000000..0d35e0ac01406 --- /dev/null +++ b/bindings/pyroot/pythonizations/test/rbatchgenerator_completeness.py @@ -0,0 +1,1000 @@ +import unittest +import os +import ROOT +import numpy as np +from random import randrange + + +class RBatchGeneratorMultipleFiles(unittest.TestCase): + + file_name1 = "first_half.root" + file_name2 = "second_half.root" + tree_name = "mytree" + + # default constants + n_train_batch = 2 + n_val_batch = 1 + val_remainder = 1 + + # Helpers + def define_rdf(self, num_of_entries=10): + df = ROOT.RDataFrame(num_of_entries)\ + .Define("b1", "(int) rdfentry_")\ + .Define("b2", "(double) b1*b1") + + return df + + def create_file(self, num_of_entries=10): + self.define_rdf(num_of_entries).Snapshot( + self.tree_name, self.file_name1) + + def create_5_entries_file(self): + df1 = ROOT.RDataFrame(5)\ + .Define("b1", "(int) rdfentry_ + 10")\ + .Define("b2", "(double) b1 * b1")\ + .Snapshot(self.tree_name, self.file_name2) + + def teardown_file(self, file): + os.remove(file) + + def test01_each_element_is_generated_unshuffled(self): + self.create_file() + + try: + df = ROOT.RDataFrame(self.tree_name, self.file_name1) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=5, + target="b2", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0] + results_x_val = [3.0, 4.0, 8.0, 9.0] + results_y_train = [0.0, 1.0, 4.0, 25.0, 36.0, 49.0] + results_y_val = [9.0, 16.0, 64.0, 81.0] + + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + + train_iter = iter(gen_train) + val_iter = iter(gen_validation) + + for _ in range(self.n_train_batch): + x, y = next(train_iter) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + + for _ in range(self.n_val_batch): + x, y = next(val_iter) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + x, y = next(val_iter) + self.assertTrue(x.shape == (self.val_remainder, 1)) + self.assertTrue(y.shape == (self.val_remainder, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + flat_x_train = [ + x for xl in collected_x_train for xs in xl for x in xs] + flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs] + flat_y_train = [ + y for yl in collected_y_train for ys in yl for y in ys] + flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys] + + self.assertEqual(results_x_train, flat_x_train) + self.assertEqual(results_x_val, flat_x_val) + self.assertEqual(results_y_train, flat_y_train) + self.assertEqual(results_y_val, flat_y_val) + + self.teardown_file(self.file_name1) + + except: + self.teardown_file(self.file_name1) + raise + + def test02_each_element_is_generated_shuffled(self): + self.create_file() + + try: + df = ROOT.RDataFrame(self.tree_name, self.file_name1) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=5, + target="b2", + validation_split=0.4, + shuffle=True, + drop_remainder=False + ) + + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + + train_iter = iter(gen_train) + val_iter = iter(gen_validation) + + for _ in range(self.n_train_batch): + x, y = next(train_iter) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + + for _ in range(self.n_val_batch): + x, y = next(val_iter) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + x, y = next(val_iter) + self.assertTrue(x.shape == (self.val_remainder, 1)) + self.assertTrue(y.shape == (self.val_remainder, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + flat_x_train = { + x for xl in collected_x_train for xs in xl for x in xs} + flat_x_val = {x for xl in collected_x_val for xs in xl for x in xs} + flat_y_train = { + y for yl in collected_y_train for ys in yl for y in ys} + flat_y_val = {y for yl in collected_y_val for ys in yl for y in ys} + + self.assertEqual(len(flat_x_train), 6) + self.assertEqual(len(flat_x_val), 4) + self.assertEqual(len(flat_y_train), 6) + self.assertEqual(len(flat_y_val), 4) + + self.teardown_file(self.file_name1) + + except: + self.teardown_file(self.file_name1) + raise + + def test03_chunk_input_smaller_than_batch_size(self): + """Checking for the situation when the batch can only be created after + more than two chunks. If not, segmentation fault will arise""" + self.create_file() + + try: + df = ROOT.RDataFrame(self.tree_name, self.file_name1) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=3, + target="b2", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + next(iter(gen_train)) + + self.teardown_file(self.file_name1) + + except: + self.teardown_file(self.file_name1) + raise + + def test04_dropping_remainder(self): + self.create_file() + + try: + df = ROOT.RDataFrame(self.tree_name, self.file_name1) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=5, + target="b2", + validation_split=0.4, + shuffle=False, + drop_remainder=True + ) + + collected_x = [] + collected_y = [] + + for x, y in gen_train: + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x.append(x) + collected_y.append(y) + + for x, y in gen_validation: + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x.append(x) + collected_y.append(y) + + self.assertEqual(len(collected_x), 3) + self.assertEqual(len(collected_y), 3) + + self.teardown_file(self.file_name1) + + except: + self.teardown_file(self.file_name1) + raise + + def test05_more_than_one_file(self): + self.create_file() + self.create_5_entries_file() + + try: + df = ROOT.RDataFrame( + self.tree_name, [self.file_name1, self.file_name2]) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=5, + target="b2", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0, 10.0, 11.0, 12.0] + results_x_val = [3.0, 4.0, 8.0, 9.0, 13.0, 14.0] + results_y_train = [0.0, 1.0, 4.0, 25.0, + 36.0, 49.0, 100.0, 121.0, 144.0] + results_y_val = [9.0, 16.0, 64.0, 81.0, 169.0, 196.0] + + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + + for x, y in gen_train: + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + + for x, y in gen_validation: + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + flat_x_train = [ + x for xl in collected_x_train for xs in xl for x in xs] + flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs] + flat_y_train = [ + y for yl in collected_y_train for ys in yl for y in ys] + flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys] + + self.assertEqual(results_x_train, flat_x_train) + self.assertEqual(results_x_val, flat_x_val) + self.assertEqual(results_y_train, flat_y_train) + self.assertEqual(results_y_val, flat_y_val) + + self.teardown_file(self.file_name1) + self.teardown_file(self.file_name2) + + except: + self.teardown_file(self.file_name1) + self.teardown_file(self.file_name2) + raise + + def test06_multiple_target_columns(self): + file_name = "multiple_target_columns.root" + + ROOT.RDataFrame(10)\ + .Define("b1", "(Short_t) rdfentry_")\ + .Define("b2", "(UShort_t) b1 * b1")\ + .Define("b3", "(double) rdfentry_ * 10")\ + .Define("b4", "(double) b3 * 10")\ + .Snapshot("myTree", file_name) + try: + df = ROOT.RDataFrame("myTree", file_name) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=5, + target=["b2", "b4"], + weights="b3", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0] + results_x_val = [3.0, 4.0, 8.0, 9.0] + results_y_train = [0.0, 0.0, 1.0, 100.0, 4.0, + 200.0, 25.0, 500.0, 36.0, 600.0, 49.0, 700.0] + results_y_val = [9.0, 300.0, 16.0, 400.0, 64.0, 800.0, 81.0, 900.0] + results_z_train = [0.0, 10.0, 20.0, 50.0, 60.0, 70.0] + results_z_val = [30.0, 40.0, 80.0, 90.0] + + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + collected_z_train = [] + collected_z_val = [] + + iter_train = iter(gen_train) + iter_val = iter(gen_validation) + + for _ in range(self.n_train_batch): + x, y, z = next(iter_train) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 2)) + self.assertTrue(z.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + collected_z_train.append(z.tolist()) + + for _ in range(self.n_val_batch): + x, y, z = next(iter_val) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 2)) + self.assertTrue(z.shape == (3, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + collected_z_val.append(z.tolist()) + + x, y, z = next(iter_val) + self.assertTrue(x.shape == (self.val_remainder, 1)) + self.assertTrue(y.shape == (self.val_remainder, 2)) + self.assertTrue(z.shape == (self.val_remainder, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + collected_z_val.append(z.tolist()) + + flat_x_train = [ + x for xl in collected_x_train for xs in xl for x in xs] + flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs] + flat_y_train = [ + y for yl in collected_y_train for ys in yl for y in ys] + flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys] + flat_z_train = [ + z for zl in collected_z_train for zs in zl for z in zs] + flat_z_val = [z for zl in collected_z_val for zs in zl for z in zs] + + self.assertEqual(results_x_train, flat_x_train) + self.assertEqual(results_x_val, flat_x_val) + self.assertEqual(results_y_train, flat_y_train) + self.assertEqual(results_y_val, flat_y_val) + self.assertEqual(results_z_train, flat_z_train) + self.assertEqual(results_z_val, flat_z_val) + + self.teardown_file(file_name) + + except: + self.teardown_file(file_name) + raise + + def test07_multiple_input_columns(self): + file_name = "multiple_input_columns.root" + + ROOT.RDataFrame(10)\ + .Define("b1", "(Short_t) rdfentry_")\ + .Define("b2", "(UShort_t) b1 * b1")\ + .Define("b3", "(double) rdfentry_ * 10")\ + .Snapshot("myTree", file_name) + + try: + df = ROOT.RDataFrame("myTree", file_name) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=5, + target="b2", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + results_x_train = [0.0, 0.0, 1.0, 10.0, 2.0, + 20.0, 5.0, 50.0, 6.0, 60.0, 7.0, 70.0] + results_x_val = [3.0, 30.0, 4.0, 40.0, 8.0, 80.0, 9.0, 90.0] + results_y_train = [0.0, 1.0, 4.0, 25.0, 36.0, 49.] + results_y_val = [9.0, 16.0, 64.0, 81.0] + + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + + iter_train = iter(gen_train) + iter_val = iter(gen_validation) + + for _ in range(self.n_train_batch): + x, y = next(iter_train) + self.assertTrue(x.shape == (3, 2)) + self.assertTrue(y.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + + for _ in range(self.n_val_batch): + x, y = next(iter_val) + self.assertTrue(x.shape == (3, 2)) + self.assertTrue(y.shape == (3, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + x, y = next(iter_val) + self.assertTrue(x.shape == (self.val_remainder, 2)) + self.assertTrue(y.shape == (self.val_remainder, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + flat_x_train = [ + x for xl in collected_x_train for xs in xl for x in xs] + flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs] + flat_y_train = [ + y for yl in collected_y_train for ys in yl for y in ys] + flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys] + + self.assertEqual(results_x_train, flat_x_train) + self.assertEqual(results_x_val, flat_x_val) + self.assertEqual(results_y_train, flat_y_train) + self.assertEqual(results_y_val, flat_y_val) + + self.teardown_file(file_name) + + except: + self.teardown_file(file_name) + raise + + def test08_filtered(self): + self.create_file() + + try: + df = ROOT.RDataFrame(self.tree_name, self.file_name1) + + dff = df.Filter("b1 % 2 == 0", "name") + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + dff, + batch_size=3, + chunk_size=5, + target="b2", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + results_x_train = [0.0, 2.0, 4.0] + results_x_val = [6.0, 8.0] + results_y_train = [0.0, 4.0, 16.0] + results_y_val = [36.0, 64.0] + + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + + train_iter = iter(gen_train) + val_iter = iter(gen_validation) + + x, y = next(train_iter) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + + x, y = next(val_iter) + self.assertTrue(x.shape == (2, 1)) + self.assertTrue(y.shape == (2, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + flat_x_train = [ + x for xl in collected_x_train for xs in xl for x in xs] + flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs] + flat_y_train = [ + y for yl in collected_y_train for ys in yl for y in ys] + flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys] + + self.assertEqual(results_x_train, flat_x_train) + self.assertEqual(results_x_val, flat_x_val) + self.assertEqual(results_y_train, flat_y_train) + self.assertEqual(results_y_val, flat_y_val) + + self.teardown_file(self.file_name1) + + except: + self.teardown_file(self.file_name1) + raise + + def test09_filtered_last_chunk(self): + file_name = "filtered_last_chunk.root" + tree_name = "myTree" + + ROOT.RDataFrame(20)\ + .Define("b1", "(Short_t) rdfentry_")\ + .Define("b2", "(UShort_t) b1 * b1")\ + .Snapshot(tree_name, file_name) + + try: + df = ROOT.RDataFrame(tree_name, file_name) + + dff = df.Filter("b1 % 2 == 0", "name") + + gen_train, _ = ROOT.TMVA.Experimental.CreateNumPyGenerators( + dff, + batch_size=3, + chunk_size=9, + target="b2", + validation_split=0, + shuffle=False, + drop_remainder=False + ) + + results_x_train = [0.0, 2.0, 4.0, 6.0, + 8.0, 10.0, 12.0, 14.0, 16.0, 18.0] + results_y_train = [0.0, 4.0, 16.0, 36.0, + 64.0, 100.0, 144.0, 196.0, 256.0, 324.0] + + collected_x_train = [] + collected_y_train = [] + + train_iter = iter(gen_train) + + for _ in range(3): + x, y = next(train_iter) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + + x, y = next(train_iter) + self.assertTrue(x.shape == (1, 1)) + self.assertTrue(y.shape == (1, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + + flat_x_train = [ + x for xl in collected_x_train for xs in xl for x in xs] + flat_y_train = [ + y for yl in collected_y_train for ys in yl for y in ys] + + self.assertEqual(results_x_train, flat_x_train) + self.assertEqual(results_y_train, flat_y_train) + + self.teardown_file(file_name) + + except: + self.teardown_file(file_name) + raise + + def test10_two_epochs_shuffled(self): + self.create_file() + + try: + df = ROOT.RDataFrame(self.tree_name, self.file_name1) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=5, + target="b2", + validation_split=0.4, + shuffle=True, + drop_remainder=False + ) + + both_epochs_collected_x_val = [] + both_epochs_collected_y_val = [] + + for _ in range(2): + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + + iter_train = iter(gen_train) + iter_val = iter(gen_validation) + + for _ in range(self.n_train_batch): + x, y = next(iter_train) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + + for _ in range(self.n_val_batch): + x, y = next(iter_val) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + x, y = next(iter_val) + self.assertTrue(x.shape == (self.val_remainder, 1)) + self.assertTrue(y.shape == (self.val_remainder, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + + flat_x_train = { + x for xl in collected_x_train for xs in xl for x in xs} + flat_x_val = { + x for xl in collected_x_val for xs in xl for x in xs} + flat_y_train = { + y for yl in collected_y_train for ys in yl for y in ys} + flat_y_val = { + y for yl in collected_y_val for ys in yl for y in ys} + + self.assertEqual(len(flat_x_train), 6) + self.assertEqual(len(flat_x_val), 4) + self.assertEqual(len(flat_y_train), 6) + self.assertEqual(len(flat_y_val), 4) + + both_epochs_collected_x_val.append(collected_x_val) + both_epochs_collected_y_val.append(collected_y_val) + + self.assertEqual( + both_epochs_collected_x_val[0], both_epochs_collected_x_val[1]) + self.assertEqual( + both_epochs_collected_y_val[0], both_epochs_collected_y_val[1]) + finally: + self.teardown_file(self.file_name1) + + def test11_number_of_training_and_validation_batches_remainder(self): + self.create_file() + + try: + df = ROOT.RDataFrame(self.tree_name, self.file_name1) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=3, + chunk_size=5, + target="b2", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + number_of_training_batches = 0 + number_of_validation_batches = 0 + + for _ in gen_train: + number_of_training_batches += 1 + + for _ in gen_validation: + number_of_validation_batches += 1 + + self.assertEqual(gen_train.number_of_batches, + number_of_training_batches) + self.assertEqual(gen_validation.number_of_batches, + number_of_validation_batches) + self.assertEqual(gen_train.last_batch_no_of_rows, 0) + self.assertEqual(gen_validation.last_batch_no_of_rows, 1) + + self.teardown_file(self.file_name1) + + except: + self.teardown_file(self.file_name1) + raise + + def test12_PyTorch(self): + import torch + + file_name = "multiple_target_columns.root" + + ROOT.RDataFrame(10)\ + .Define("b1", "(Short_t) rdfentry_")\ + .Define("b2", "(UShort_t) b1 * b1")\ + .Define("b3", "(double) rdfentry_ * 10")\ + .Define("b4", "(double) b3 * 10")\ + .Snapshot("myTree", file_name) + + try: + df = ROOT.RDataFrame("myTree", file_name) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreatePyTorchGenerators( + df, + batch_size=3, + chunk_size=5, + target=["b2", "b4"], + weights="b3", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0] + results_x_val = [3.0, 4.0, 8.0, 9.0] + results_y_train = [0.0, 0.0, 1.0, 100.0, 4.0, + 200.0, 25.0, 500.0, 36.0, 600.0, 49.0, 700.0] + results_y_val = [9.0, 300.0, 16.0, 400.0, 64.0, 800.0, 81.0, 900.0] + results_z_train = [0.0, 10.0, 20.0, 50.0, 60.0, 70.0] + results_z_val = [30.0, 40.0, 80.0, 90.0] + + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + collected_z_train = [] + collected_z_val = [] + + iter_train = iter(gen_train) + iter_val = iter(gen_validation) + + for _ in range(self.n_train_batch): + x, y, z = next(iter_train) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 2)) + self.assertTrue(z.shape == (3, 1)) + collected_x_train.append(x.tolist()) + collected_y_train.append(y.tolist()) + collected_z_train.append(z.tolist()) + + for _ in range(self.n_val_batch): + x, y, z = next(iter_val) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 2)) + self.assertTrue(z.shape == (3, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + collected_z_val.append(z.tolist()) + + x, y, z = next(iter_val) + self.assertTrue(x.shape == (self.val_remainder, 1)) + self.assertTrue(y.shape == (self.val_remainder, 2)) + self.assertTrue(z.shape == (self.val_remainder, 1)) + collected_x_val.append(x.tolist()) + collected_y_val.append(y.tolist()) + collected_z_val.append(z.tolist()) + + flat_x_train = [ + x for xl in collected_x_train for xs in xl for x in xs] + flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs] + flat_y_train = [ + y for yl in collected_y_train for ys in yl for y in ys] + flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys] + flat_z_train = [ + z for zl in collected_z_train for zs in zl for z in zs] + flat_z_val = [z for zl in collected_z_val for zs in zl for z in zs] + + self.assertEqual(results_x_train, flat_x_train) + self.assertEqual(results_x_val, flat_x_val) + self.assertEqual(results_y_train, flat_y_train) + self.assertEqual(results_y_val, flat_y_val) + self.assertEqual(results_z_train, flat_z_train) + self.assertEqual(results_z_val, flat_z_val) + + self.teardown_file(file_name) + + except: + self.teardown_file(file_name) + raise + + def test13_TensorFlow(self): + import tensorflow as tf + + file_name = "multiple_target_columns.root" + + ROOT.RDataFrame(10)\ + .Define("b1", "(Short_t) rdfentry_")\ + .Define("b2", "(UShort_t) b1 * b1")\ + .Define("b3", "(double) rdfentry_ * 10")\ + .Define("b4", "(double) b3 * 10")\ + .Snapshot("myTree", file_name) + + try: + df = ROOT.RDataFrame("myTree", file_name) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateTFDatasets( + df, + batch_size=3, + chunk_size=5, + target=["b2", "b4"], + weights="b3", + validation_split=0.4, + shuffle=False, + drop_remainder=False + ) + + results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0] + results_x_val = [3.0, 4.0, 8.0, 9.0, 0.0, 0.0] + results_y_train = [0.0, 0.0, 1.0, 100.0, 4.0, + 200.0, 25.0, 500.0, 36.0, 600.0, 49.0, 700.0] + results_y_val = [9.0, 300.0, 16.0, 400.0, 64.0, + 800.0, 81.0, 900.0, 0.0, 0.0, 0.0, 0.0] + results_z_train = [0.0, 10.0, 20.0, 50.0, 60.0, 70.0] + results_z_val = [30.0, 40.0, 80.0, 90.0, 0.0, 0.0] + + collected_x_train = [] + collected_x_val = [] + collected_y_train = [] + collected_y_val = [] + collected_z_train = [] + collected_z_val = [] + + iter_train = iter(gen_train) + iter_val = iter(gen_validation) + + for _ in range(self.n_train_batch): + x, y, z = next(iter_train) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 2)) + self.assertTrue(z.shape == (3, 1)) + collected_x_train.append(x.numpy().tolist()) + collected_y_train.append(y.numpy().tolist()) + collected_z_train.append(z.numpy().tolist()) + + for _ in range(self.n_val_batch): + x, y, z = next(iter_val) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 2)) + self.assertTrue(z.shape == (3, 1)) + collected_x_val.append(x.numpy().tolist()) + collected_y_val.append(y.numpy().tolist()) + collected_z_val.append(z.numpy().tolist()) + + x, y, z = next(iter_val) + self.assertTrue(x.shape == (3, 1)) + self.assertTrue(y.shape == (3, 2)) + self.assertTrue(z.shape == (3, 1)) + collected_x_val.append(x.numpy().tolist()) + collected_y_val.append(y.numpy().tolist()) + collected_z_val.append(z.numpy().tolist()) + + flat_x_train = [ + x for xl in collected_x_train for xs in xl for x in xs] + flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs] + flat_y_train = [ + y for yl in collected_y_train for ys in yl for y in ys] + flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys] + flat_z_train = [ + z for zl in collected_z_train for zs in zl for z in zs] + flat_z_val = [z for zl in collected_z_val for zs in zl for z in zs] + + self.assertEqual(results_x_train, flat_x_train) + self.assertEqual(results_x_val, flat_x_val) + self.assertEqual(results_y_train, flat_y_train) + self.assertEqual(results_y_val, flat_y_val) + self.assertEqual(results_z_train, flat_z_train) + self.assertEqual(results_z_val, flat_z_val) + + self.teardown_file(file_name) + + except: + self.teardown_file(file_name) + raise + + def test14_big_data(self): + file_name = "big_data.root" + tree_name = "myTree" + + entries_in_rdf = randrange(10000, 30000) + chunk_size = randrange(1000, 3001) + batch_size = randrange(100, 501) + + error_message = f"\n Batch size: {batch_size} Chunk size: {chunk_size}\ + Number of entries: {entries_in_rdf}" + + def define_rdf(num_of_entries): + ROOT.RDataFrame(num_of_entries)\ + .Define("b1", "(int) rdfentry_")\ + .Define("b2", "(double) rdfentry_ * 2")\ + .Define("b3", "(int) rdfentry_ + 10192")\ + .Define("b4", "(int) -rdfentry_")\ + .Define("b5", "(double) -rdfentry_ - 10192")\ + .Snapshot(tree_name, file_name) + + def test(size_of_batch, size_of_chunk, num_of_entries): + define_rdf(num_of_entries) + + try: + df = ROOT.RDataFrame(tree_name, file_name) + + gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + df, + batch_size=size_of_batch, + chunk_size=size_of_chunk, + target=["b3", "b5"], + weights="b2", + validation_split=0.3, + shuffle=False, + drop_remainder=False + ) + + collect_x = [] + + train_remainder = gen_train.last_batch_no_of_rows + val_remainder = gen_validation.last_batch_no_of_rows + + n_train_batches = gen_train.number_of_batches - \ + 1 if train_remainder else gen_train.number_of_batches + n_val_batches = gen_validation.number_of_batches - \ + 1 if val_remainder else gen_validation.number_of_batches + + iter_train = iter(gen_train) + iter_val = iter(gen_validation) + + for i in range(n_train_batches): + x, y, z = next(iter_train) + + self.assertTrue(x.shape == (size_of_batch, 2), + error_message + f" row: {i} x shape: {x.shape}") + self.assertTrue(y.shape == (size_of_batch, 2), + error_message + f" row: {i} y shape: {y.shape}") + self.assertTrue(z.shape == (size_of_batch, 1), + error_message + f" row: {i} z shape: {z.shape}") + + self.assertTrue( + np.all(x[:, 0]*(-1) == x[:, 1]), error_message + f" row: {i}") + self.assertTrue( + np.all(x[:, 0]+10192 == y[:, 0]), error_message + f" row: {i}") + # self.assertTrue(np.all(x[:,0]*(-1)-10192==y[:,1]), error_message) + self.assertTrue( + np.all(x[:, 0]*2 == z[:, 0]), error_message + f" row: {i}") + + collect_x.extend(list(x[:, 0])) + + if train_remainder: + x, y, z = next(iter_train) + self.assertTrue(x.shape == ( + train_remainder, 2), error_message) + self.assertTrue(y.shape == ( + train_remainder, 2), error_message) + self.assertTrue(z.shape == ( + train_remainder, 1), error_message) + collect_x.extend(list(x[:, 0])) + + for _ in range(n_val_batches): + x, y, z = next(iter_val) + + self.assertTrue(x.shape == (size_of_batch, 2), + error_message + f" row: {i} x shape: {x.shape}") + self.assertTrue(y.shape == (size_of_batch, 2), + error_message + f" row: {i} y shape: {y.shape}") + self.assertTrue(z.shape == (size_of_batch, 1), + error_message + f" row: {i} z shape: {z.shape}") + + self.assertTrue( + np.all(x[:, 0]*(-1) == x[:, 1]), error_message) + self.assertTrue( + np.all(x[:, 0]+10192 == y[:, 0]), error_message) + # self.assertTrue(np.all(x[:,0]*(-1)-10192==y[:,1]), error_message) + self.assertTrue( + np.all(x[:, 0]*2 == z[:, 0]), error_message) + + collect_x.extend(list(x[:, 0])) + + if val_remainder: + x, y, z = next(iter_val) + self.assertTrue(x.shape == ( + val_remainder, 2), error_message) + self.assertTrue(y.shape == ( + val_remainder, 2), error_message) + self.assertTrue(z.shape == ( + val_remainder, 1), error_message) + collect_x.extend(list(x[:, 0])) + + self.assertTrue(set(collect_x) == set(i for i in range(num_of_entries)), f"collected length: {len(set(collect_x))}\ + generated length {len(set(i for i in range(num_of_entries)))}") + + except: + self.teardown_file(file_name) + raise + + test(batch_size, chunk_size, entries_in_rdf) + + +if __name__ == '__main__': + unittest.main() diff --git a/bindings/pyroot/pythonizations/test/rdf_define_pyz.py b/bindings/pyroot/pythonizations/test/rdf_define_pyz.py index f67d469f1f7da..3478fdc05074c 100644 --- a/bindings/pyroot/pythonizations/test/rdf_define_pyz.py +++ b/bindings/pyroot/pythonizations/test/rdf_define_pyz.py @@ -7,70 +7,6 @@ class PyDefine(unittest.TestCase): Testing Pythonized Define of RDF """ - def test_with_dtypes(self): - """ - Tests the pythonized define with all the numba declare datatypes and - """ - numba_declare_dtypes = ['float', 'double', 'int', 'unsigned int', 'long', 'unsigned long', 'bool'] - rdf = ROOT.RDataFrame(10) - for type in numba_declare_dtypes: - col_name = "col_" + type.replace(" ","") - rdf = rdf.Define(col_name, f"({type}) rdfentry_") - rdf = rdf.Define(col_name + "_arr", lambda col: np.array([col,col]), [col_name]) - arr = np.arange(0, 10) - if type == 'bool': - arr = np.array(arr, dtype='bool') - flag1 = np.array_equal(rdf.AsNumpy()[col_name], arr) - flag2 = True - for idx, entry in enumerate(rdf.AsNumpy()[col_name + "_arr"]): - if not (entry[0] == arr[idx] and entry[1] == arr[idx]): - flag2 = False - self.assertTrue(flag1 and flag2) - - def test_define_overload1(self): - rdf = ROOT.RDataFrame(10).Define("x", "rdfentry_") - rdf = rdf.Define("x2", lambda y: y*y, ["x"]) - arr = np.arange(0, 10) - flag = np.array_equal(rdf.AsNumpy()["x2"], arr*arr) - self.assertTrue(flag) - - def test_define_overload2(self): - rdf = ROOT.RDataFrame(10).Define("x", "rdfentry_") - rdf = rdf.Define("x2", lambda x: x*x) - arr = np.arange(0, 10) - flag = np.array_equal(rdf.AsNumpy()["x2"], arr*arr) - self.assertTrue(flag) - - def test_define_extra_args(self): - rdf = ROOT.RDataFrame(10).Define("x", "rdfentry_") - def x_y(x, y): - return x*y - rdf = rdf.Define("x_y", x_y , extra_args = {"y": 0.5}) - arr = np.arange(0, 10) - flag = np.array_equal(rdf.AsNumpy()["x_y"], arr*0.5) - self.assertTrue(flag) - - def test_capture_from_scope(self): - rdf = ROOT.RDataFrame(10).Define("x", "rdfentry_") - y = 0.5 - def x_times_y(x): - return x*y - rdf = rdf.Define("x_y", x_times_y ) - arr = np.arange(0, 10) - flag = np.array_equal(rdf.AsNumpy()["x_y"], arr*0.5) - self.assertTrue(flag) - - def test_arrays(self): - rdf = ROOT.RDataFrame(5).Define("x", "rdfentry_") - rdf = rdf.Define("x_arr", lambda x: np.array([x, x])) - def norm(x_arr): - return np.sqrt(x_arr[0]**2 + x_arr[1]**2) - rdf = rdf.Define("mag", norm) - arr = np.arange(0, 5) - arr = np.sqrt(arr*arr + arr*arr ) - flag = np.array_equal(rdf.AsNumpy()["mag"], arr) - self.assertTrue(flag) - def test_cpp_functor(self): """ Test that a C++ functor can be passed as a callable argument of a diff --git a/bindings/pyroot/pythonizations/test/rdf_filter_pyz.py b/bindings/pyroot/pythonizations/test/rdf_filter_pyz.py index ea9daefa56562..9cdc9df28ebb8 100755 --- a/bindings/pyroot/pythonizations/test/rdf_filter_pyz.py +++ b/bindings/pyroot/pythonizations/test/rdf_filter_pyz.py @@ -3,90 +3,12 @@ import numpy as np import os -from rdf_filter_pyz_helper import CreateData, TYPE_TO_SYMBOL, filter_dict class PyFilter(unittest.TestCase): """ Testing Pythonized Filters of RDF """ - def test_with_dtypes(self): - """ - Tests the pythonized filter with all the tree datatypes and - """ - CreateData() - rdf = ROOT.RDataFrame("TestData", "./RDF_Filter_Pyz_TestData.root") - test_cols =[str(c) for c in rdf.GetColumnNames()] - for col_name in test_cols: - func = filter_dict[TYPE_TO_SYMBOL[col_name]] # filter function - x = rdf.Mean(col_name).GetValue() - if col_name == 'Bool_t': x = True - filtered = rdf.Filter(func, extra_args = {'x':x}) - res_root = filtered.AsNumpy()[col_name] - if not isinstance(x, bool): - filtered2 = rdf.Filter(f"{col_name} > {x}") - else: - if x: - filtered2 = rdf.Filter(f"{col_name} == true") - else: - filtered2 = rdf.Filter(f"{col_name} == false") - res_root2 = filtered2.AsNumpy()[col_name] - self.assertTrue(np.array_equal(res_root,res_root2)) - - os.remove("./RDF_Filter_Pyz_TestData.root") - - # CPP Overload 1: Filter(callable, col_list = [], name = "") => 3 Possibilities - def test_filter_overload1_a(self): - """ - Test to verify the first overload (1.a) of filter - Filter(callable, col_list, name) - """ - rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_") - def x_greater_than_2(x): - return x>2 - fil1 = rdf.Filter(x_greater_than_2, ["x"], "x is more than 2") - self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4]))) - - def test_filter_overload1_b(self): - """ - Test to verify the first overload (1.b) of filter - Filter(callable, col_list) - """ - rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_") - fil1 = rdf.Filter(lambda x: x>2, ["x"]) - self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4]))) - - def test_filter_overload1_c(self): - """ - Test to verify the first overload (1.c) of filter - Filter(callable) - """ - rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_") - def x_greater_than_2(x): - return x>2 - fil1 = rdf.Filter(x_greater_than_2) - self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4]))) - - # CPP Overload 3: Filter(callable, name) - def test_filter_overload3(self): - """ - Test to verify the third overload of filter - Filter(callable, name) - """ - rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_") - def x_greater_than_2(x): - return x>2 - fil1 = rdf.Filter(x_greater_than_2, "x is greater than 2") - self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4]))) - - def test_capture_from_scope(self): - rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_") - y = 2 - def x_greater_than_y(x): - return x > y - fil1 = rdf.Filter(x_greater_than_y, "x is greater than 2") - self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4]))) - def test_cpp_functor(self): """ Test that a C++ functor can be passed as a callable argument of a diff --git a/bindings/pyroot/pythonizations/test/rdf_filter_pyz_helper.py b/bindings/pyroot/pythonizations/test/rdf_filter_pyz_helper.py deleted file mode 100755 index 367fbd963d131..0000000000000 --- a/bindings/pyroot/pythonizations/test/rdf_filter_pyz_helper.py +++ /dev/null @@ -1,121 +0,0 @@ -import ROOT -import numpy as np - -def CreateData(): - """ - This function generates the root files of various datatypes with random values to test them. - Datatypes could be generated are Strings, Char_t, UChar_t - """ - # function to create random numbers.. gRandom did not give me signed integers - @ROOT.Numba.Declare(['int', 'bool'], 'long') - def random_long(bits, signed): - if signed: - low = -1*2**(bits - 1) - high = 2**(bits - 1) -1 - else: - low = 0 - high = 2**bits - return np.random.randint(low, high) - - N = 100 # df with 100 entries - df = ROOT.RDataFrame(N) - - col_name = "Short_t" - df = df.Define(col_name, f"({col_name}) Numba::random_long(16, true)") - - col_name = "UShort_t" - df = df.Define(col_name, f"({col_name}) Numba::random_long(16, false)") - - col_name = "Int_t" - df = df.Define(col_name, f"({col_name}) Numba::random_long(32, true)") - - col_name = "UInt_t" - df = df.Define(col_name, f"({col_name}) Numba::random_long(32, false)") - - col_name = "Float_t" - df = df.Define(col_name, f"({col_name}) gRandom->Gaus()") - - col_name = "Float16_t" - df = df.Define(col_name, f"({col_name}) gRandom->Gaus()") - - col_name = "Double_t" - df = df.Define(col_name, f"({col_name}) gRandom->Gaus()") - - col_name = "Double32_t" - df = df.Define(col_name, f"({col_name}) gRandom->Gaus()") - - col_name = "Long64_t" - df = df.Define(col_name, f"({col_name}) rdfentry_") - - col_name = "ULong64_t" - df = df.Define(col_name, f"({col_name}) rdfentry_") - - col_name = "Long_t" - df = df.Define(col_name, f"({col_name}) rdfentry_") - - col_name = "ULong_t" - df = df.Define(col_name, f"({col_name}) rdfentry_") - - col_name = "Bool_t" - df = df.Define(col_name, f"({col_name}) gRandom->Integer(2)") - - df.Snapshot("TestData", "./RDF_Filter_Pyz_TestData.root") - -def filter_general(col, x): - return bool(col > x) - -def filter_C(String, x): - pass - -def filter_B(Char_t, x): - return bool(Char_t > x) - -def filter_b(UChar_t, x): - return bool(UChar_t > x) - -def filter_S(Short_t, x): - return bool(Short_t > x) - -def filter_s(UShort_t, x): - return bool(UShort_t > x) - -def filter_I(Int_t, x): - return bool(Int_t > x) - -def filter_i(UInt_t, x): - return bool(UInt_t > x) - -def filter_F(Float_t, x): - return bool(Float_t > x) - -def filter_f(Float16_t, x): - return bool(Float16_t > x) - -def filter_D(Double_t, x): - return bool(Double_t > x) - -def filter_d(Double32_t, x): - return bool(Double32_t > x) - -def filter_L(Long64_t, x): - return bool(Long64_t > x) - -def filter_l(ULong64_t, x): - return bool(ULong64_t > x) - -def filter_G(Long_t, x): - return bool(Long_t > x) - -def filter_g(ULong_t, x): - return bool(ULong_t > x) - -def filter_O(Bool_t, x): - return bool(x == Bool_t) - -TREE_TYPES = ["String","Char_t", "UChar_t", "Short_t", "UShort_t", "Int_t", "UInt_t", "Float_t", "Float16_t", "Double_t", "Double32_t", "Long64_t", "ULong64_t", "Long_t", "ULong_t", "Bool_t"] -TREE_SYMS = ['C', 'B', 'b', 'S', 's', 'I', 'i', 'F', 'f', 'D', 'd', 'L', 'l', 'G', 'g', 'O'] # 16 Data Types -TYPE_TO_SYMBOL = dict(zip(TREE_TYPES, TREE_SYMS)) - -filter_dict = {} -for i in TREE_SYMS: - filter_dict[i] = eval("filter_" + i) diff --git a/bindings/pyroot/pythonizations/test/roofit/roocmdarg.py b/bindings/pyroot/pythonizations/test/roofit/roocmdarg.py new file mode 100644 index 0000000000000..b5dbe9a45111d --- /dev/null +++ b/bindings/pyroot/pythonizations/test/roofit/roocmdarg.py @@ -0,0 +1,83 @@ +import unittest + +import ROOT + +# Necessary inside the "eval" call +RooArgSet = ROOT.RooArgSet +RooCmdArg = ROOT.RooCmdArg + +x = ROOT.RooRealVar("x", "x", 1.0) +y = ROOT.RooRealVar("y", "y", 2.0) +z = ROOT.RooRealVar("z", "z", 3.0) + + +def args_equal(arg_1, arg_2): + same = True + + same &= str(arg_1.GetName()) == str(arg_2.GetName()) + same &= str(arg_1.GetTitle()) == str(arg_2.GetTitle()) + + for i in range(2): + same &= arg_1.getInt(i) == arg_2.getInt(i) + + for i in range(2): + same &= arg_1.getDouble(i) == arg_2.getDouble(i) + + for i in range(3): + same &= str(arg_1.getString(i)) == str(arg_2.getString(i)) + + same &= arg_1.procSubArgs() == arg_2.procSubArgs() + same &= arg_1.prefixSubArgs() == arg_2.prefixSubArgs() + + for i in range(2): + same &= arg_1.getObject(i) == arg_2.getObject(i) + + def set_equal(set_1, set_2): + if set_1 == ROOT.nullptr and set_2 == ROOT.nullptr: + return True + if set_1 == ROOT.nullptr and set_2 != ROOT.nullptr: + return False + if set_1 != ROOT.nullptr and set_2 == ROOT.nullptr: + return False + + if set_1.size() != set_2.size(): + return False + + return set_2.hasSameLayout(set_1) + + for i in range(2): + same &= set_equal(arg_1.getSet(i), arg_2.getSet(i)) + + return same + + +class TestRooArgList(unittest.TestCase): + """ + Test for RooCmdArg pythonizations. + """ + + def test_constructor_eval(self): + + set_1 = ROOT.RooArgSet(x, y) + set_2 = ROOT.RooArgSet(y, z) + + def do_test(*args): + arg_1 = ROOT.RooCmdArg(*args) + + # The arg should be able to recreate itself by emitting the right + # constructor code: + arg_2 = eval(arg_1.constructorCode()) + + self.assertTrue(args_equal(arg_1, arg_2)) + + nullp = ROOT.nullptr + + # only fill the non-object fields: + do_test("Test", -1, 3, 4.2, 4.7, "hello", "world", nullp, nullp, nullp, "s3", nullp, nullp) + + # RooArgSet tests: + do_test("Test", -1, 3, 4.2, 4.7, "hello", "world", nullp, nullp, nullp, "s3", set_1, set_2) + + +if __name__ == "__main__": + unittest.main() diff --git a/bindings/pyroot/pythonizations/test/tfile_context_manager.py b/bindings/pyroot/pythonizations/test/tfile_context_manager.py index cd71125e9edc6..7ab1f1e1b17e8 100644 --- a/bindings/pyroot/pythonizations/test/tfile_context_manager.py +++ b/bindings/pyroot/pythonizations/test/tfile_context_manager.py @@ -15,7 +15,7 @@ class TFileContextManager(unittest.TestCase): XMIN = 10 XMAX = 242 - def check_file_data(self, tfile, filename): + def check_file_data(self, tfile, filename, histoname): """ Check status of the TFile after the context manager and correctness of the data it contains. @@ -24,7 +24,7 @@ def check_file_data(self, tfile, filename): self.assertFalse(tfile.IsOpen()) # And it is correctly closed with TFile(filename, "read") as infile: - hin = infile.Get("myhisto") + hin = infile.Get(histoname) xaxis = hin.GetXaxis() self.assertEqual(self.NBINS, hin.GetNbinsX()) self.assertEqual(self.XMIN, xaxis.GetXmin()) @@ -37,33 +37,36 @@ def test_writeobject(self): Write a histogram in a file within a context manager, using TDirectory::WriteObject. """ filename = "TFileContextManager_test_writeobject.root" + histoname = "myhisto" with TFile(filename, "recreate") as outfile: - hout = ROOT.TH1F("myhisto", "myhisto", self.NBINS, self.XMIN, self.XMAX) + hout = ROOT.TH1F(histoname, histoname, self.NBINS, self.XMIN, self.XMAX) outfile.WriteObject(hout, "myhisto") - self.check_file_data(outfile, filename) + self.check_file_data(outfile, filename, histoname) def test_histowrite(self): """ Write a histogram in a file within a context manager, using TH1::Write. """ filename = "TFileContextManager_test_histowrite.root" + histoname = "myhisto_2" with TFile(filename, "recreate") as outfile: - hout = ROOT.TH1F("myhisto", "mhisto", self.NBINS, self.XMIN, self.XMAX) + hout = ROOT.TH1F(histoname, histoname, self.NBINS, self.XMIN, self.XMAX) hout.Write() - self.check_file_data(outfile, filename) + self.check_file_data(outfile, filename, histoname) def test_filewrite(self): """ Write a histogram in a file within a context manager, using TFile::Write. """ filename = "TFileContextManager_test_filewrite.root" + histoname = "myhisto_3" with TFile(filename, "recreate") as outfile: - hout = ROOT.TH1F("myhisto", "myhisto", self.NBINS, self.XMIN, self.XMAX) + hout = ROOT.TH1F(histoname, histoname, self.NBINS, self.XMIN, self.XMAX) outfile.Write() - self.check_file_data(outfile, filename) + self.check_file_data(outfile, filename, histoname) def test_detachhisto(self): """ diff --git a/bindings/tpython/src/TPyClassGenerator.cxx b/bindings/tpython/src/TPyClassGenerator.cxx index 05104aae69344..14629201febc5 100644 --- a/bindings/tpython/src/TPyClassGenerator.cxx +++ b/bindings/tpython/src/TPyClassGenerator.cxx @@ -87,7 +87,12 @@ TClass *TPyClassGenerator::GetClass(const char *name, Bool_t load, Bool_t silent std::string func_name = PyUnicode_AsUTF8(key); // figure out number of variables required +#if PY_VERSION_HEX < 0x30d00f0 PyObject *func_code = PyObject_GetAttrString(attr, (char *)"func_code"); +#else + PyObject *func_code = nullptr; + PyObject_GetOptionalAttrString(attr, (char *)"func_code", &func_code); +#endif PyObject *var_names = func_code ? PyObject_GetAttrString(func_code, (char *)"co_varnames") : NULL; int nVars = var_names ? PyTuple_GET_SIZE(var_names) : 0 /* TODO: probably large number, all default? */; if (nVars < 0) diff --git a/builtins/davix/CMakeLists.txt b/builtins/davix/CMakeLists.txt index 7ac84367df0e7..9f64ec64314cf 100644 --- a/builtins/davix/CMakeLists.txt +++ b/builtins/davix/CMakeLists.txt @@ -10,9 +10,9 @@ find_package(libuuid REQUIRED) find_package(LibXml2 REQUIRED) find_package(OpenSSL REQUIRED) -set(DAVIX_VERSION "0.8.7") +set(DAVIX_VERSION "0.8.7p1") set(DAVIX_URL "http://lcgpackages.web.cern.ch/lcgpackages/tarFiles/sources") -set(DAVIX_URLHASH "SHA256=78c24e14edd7e4e560392d67147ec8658c2aa0d3640415bdf6bc513afcf695e6") +set(DAVIX_URLHASH "SHA256=d4eee9f20aa032893ce488273cc0bfb62bcad8e2a1afa6b260130508eaf3ce54") set(DAVIX_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/DAVIX-prefix) set(DAVIX_LIBNAME ${CMAKE_STATIC_LIBRARY_PREFIX}davix${CMAKE_STATIC_LIBRARY_SUFFIX}) diff --git a/builtins/rendercore/RenderCore-1.5.tar.gz b/builtins/rendercore/RenderCore-1.5.tar.gz deleted file mode 100644 index 929e79c238b7f..0000000000000 Binary files a/builtins/rendercore/RenderCore-1.5.tar.gz and /dev/null differ diff --git a/builtins/rendercore/RenderCore-1.6.tar.gz b/builtins/rendercore/RenderCore-1.6.tar.gz new file mode 100644 index 0000000000000..4f35eee6ff12d Binary files /dev/null and b/builtins/rendercore/RenderCore-1.6.tar.gz differ diff --git a/builtins/xrootd/CMakeLists.txt b/builtins/xrootd/CMakeLists.txt index b8d4731587fd2..c103cf95b8e4d 100644 --- a/builtins/xrootd/CMakeLists.txt +++ b/builtins/xrootd/CMakeLists.txt @@ -6,7 +6,7 @@ include(ExternalProject) -set(XROOTD_VERSION "5.7.1") +set(XROOTD_VERSION "5.7.2") set(XROOTD_PREFIX ${CMAKE_BINARY_DIR}) message(STATUS "Downloading and building XROOTD version ${XROOTD_VERSION}") @@ -25,7 +25,7 @@ list(REMOVE_DUPLICATES XROOTD_UTILS_LIBRARIES) ExternalProject_Add( BUILTIN_XROOTD URL http://lcgpackages.web.cern.ch/lcgpackages/tarFiles/sources/xrootd-${XROOTD_VERSION}.tar.gz - URL_HASH SHA256=c28c9dc0a2f5d0134e803981be8b1e8b1c9a6ec13b49f5fa3040889b439f4041 + URL_HASH SHA256=c14c537edc66824ad3ca3c610240f9386c68993cbbcd28473ad3b42c8d14ba67 INSTALL_DIR ${XROOTD_PREFIX} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH= -DCMAKE_PREFIX_PATH:STRING=${OPENSSL_PREFIX} diff --git a/cmake/modules/RootCPack.cmake b/cmake/modules/RootCPack.cmake index f65ae71b332a2..400ccdde3cd41 100644 --- a/cmake/modules/RootCPack.cmake +++ b/cmake/modules/RootCPack.cmake @@ -84,7 +84,7 @@ if(MSVC) else() message(FATAL_ERROR "MSVC_VERSION ${MSVC_VERSION} not implemented") endif() - set(COMPILER_NAME_VERSION ".vc${VS_VERSION}") + set(COMPILER_NAME_VERSION ".python${Python3_VERSION_MAJOR}${Python3_VERSION_MINOR}.vc${VS_VERSION}") else() if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") set(COMPILER_NAME_VERSION "-gcc${CXX_MAJOR}.${CXX_MINOR}") diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake index bc458b91fdc7f..3faf6090c8a97 100644 --- a/cmake/modules/RootConfiguration.cmake +++ b/cmake/modules/RootConfiguration.cmake @@ -529,12 +529,12 @@ else() set(hashardwareinterferencesize undef) endif() +set(root_canvas_class "TRootCanvas") + if(webgui) - set(root_canvas_class "TWebCanvas") set(root_treeviewer_class "RTreeViewer") set(root_geompainter_type "web") else() - set(root_canvas_class "TRootCanvas") set(root_treeviewer_class "TTreeViewer") set(root_geompainter_type "root") endif() diff --git a/cmake/modules/SearchInstalledSoftware.cmake b/cmake/modules/SearchInstalledSoftware.cmake index 3ba540b6ee68b..509ed53ffba97 100644 --- a/cmake/modules/SearchInstalledSoftware.cmake +++ b/cmake/modules/SearchInstalledSoftware.cmake @@ -668,11 +668,11 @@ if((opengl OR cocoa) AND NOT builtin_glew) find_package(GLEW REQUIRED) else() find_package(GLEW) - # Bug was reported on newer version of CMake on Mac OS X: - # https://gitlab.kitware.com/cmake/cmake/-/issues/19662 - # https://github.com/microsoft/vcpkg/pull/7967 - if(GLEW_FOUND AND APPLE AND CMAKE_VERSION VERSION_GREATER 3.15) - message(FATAL_ERROR "Please enable builtin Glew due bug in latest CMake (use cmake option -Dbuiltin_glew=ON).") + if(GLEW_FOUND AND APPLE AND CMAKE_VERSION VERSION_GREATER 3.15 AND CMAKE_VERSION VERSION_LESS 3.25) + # Bug in CMake on Mac OS X until 3.25: + # https://gitlab.kitware.com/cmake/cmake/-/issues/19662 + # https://github.com/microsoft/vcpkg/pull/7967 + message(FATAL_ERROR "Please enable builtin Glew due to a bug in CMake's FindGlew < v3.25 (use cmake option -Dbuiltin_glew=ON).") unset(GLEW_FOUND) elseif(GLEW_FOUND AND NOT TARGET GLEW::GLEW) add_library(GLEW::GLEW UNKNOWN IMPORTED) @@ -1273,7 +1273,10 @@ if(builtin_tbb) install(DIRECTORY ${CMAKE_BINARY_DIR}/bin/ DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT libraries FILES_MATCHING PATTERN "tbb*.dll") install(DIRECTORY ${CMAKE_BINARY_DIR}/lib/ DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT libraries FILES_MATCHING PATTERN "tbb*.lib") else() - set(TBB_LIBRARIES ${CMAKE_BINARY_DIR}/lib/libtbb${CMAKE_SHARED_LIBRARY_SUFFIX}) + if (CMAKE_BUILD_TYPE STREQUAL "Debug") + set(tbbsuffix "_debug") + endif() + set(TBB_LIBRARIES ${CMAKE_BINARY_DIR}/lib/libtbb${tbbsuffix}${CMAKE_SHARED_LIBRARY_SUFFIX}) install(DIRECTORY ${CMAKE_BINARY_DIR}/lib/ DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT libraries FILES_MATCHING PATTERN "libtbb*") endif() if(tbb_build) @@ -1442,7 +1445,7 @@ if(builtin_veccore) endif() if(builtin_veccore) - set(VecCore_VERSION "0.7.0") + set(VecCore_VERSION "0.8.2") set(VecCore_PROJECT "VecCore-${VecCore_VERSION}") set(VecCore_SRC_URI "${lcgpackages}/${VecCore_PROJECT}.tar.gz") set(VecCore_DESTDIR "${CMAKE_BINARY_DIR}/externals") @@ -1450,7 +1453,7 @@ if(builtin_veccore) ExternalProject_Add(VECCORE URL ${VecCore_SRC_URI} - URL_HASH SHA256=61d9fc4be815c5c98088c2796763d3ed82ba4bad5a69b7892c1c2e7e1e53d311 + URL_HASH SHA256=1268bca92acf00acd9775f1e79a2da7b1d902733d17e283e0dd5e02c41ac9666 BUILD_IN_SOURCE 0 LOG_DOWNLOAD 1 LOG_CONFIGURE 1 LOG_BUILD 1 LOG_INSTALL 1 CMAKE_ARGS -G ${CMAKE_GENERATOR} @@ -2010,8 +2013,8 @@ if(webgui) endif() ExternalProject_Add( RENDERCORE - URL ${CMAKE_SOURCE_DIR}/builtins/rendercore/RenderCore-1.5.tar.gz - URL_HASH SHA256=c3f58e952e85308ba62142cba2ae627e6bcfcaa6ec1071e1483d1938d3df4a8e + URL ${CMAKE_SOURCE_DIR}/builtins/rendercore/RenderCore-1.6.tar.gz + URL_HASH SHA256=2fac6bfaef7ae8162091dfda7b2c2cfe3c5cbf841426d948d39deba72d860734 CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" diff --git a/cmake/modules/SetROOTVersion.cmake b/cmake/modules/SetROOTVersion.cmake index fe14f287e5195..a428860d9f22c 100644 --- a/cmake/modules/SetROOTVersion.cmake +++ b/cmake/modules/SetROOTVersion.cmake @@ -22,9 +22,17 @@ function(SET_VERSION_FROM_FILE) string(REGEX MATCH "#define ROOT_VERSION_MAJOR ([0-9]*)" _ ${versionstr}) set(ROOT_MAJOR_VERSION ${CMAKE_MATCH_1}) string(REGEX MATCH "#define ROOT_VERSION_MINOR ([0-9]*)" _ ${versionstr}) - set(ROOT_MINOR_VERSION ${CMAKE_MATCH_1}) + if (CMAKE_MATCH_1 LESS 10) + set(ROOT_MINOR_VERSION "0${CMAKE_MATCH_1}") + else() + set(ROOT_MINOR_VERSION ${CMAKE_MATCH_1}) + endif() string(REGEX MATCH "#define ROOT_VERSION_PATCH ([0-9]*)" _ ${versionstr}) - set(ROOT_PATCH_VERSION ${CMAKE_MATCH_1}) + if (CMAKE_MATCH_1 LESS 10) + set(ROOT_PATCH_VERSION "0${CMAKE_MATCH_1}") + else() + set(ROOT_PATCH_VERSION ${CMAKE_MATCH_1}) + endif() set(ROOT_MAJOR_VERSION "${ROOT_MAJOR_VERSION}" PARENT_SCOPE) set(ROOT_MINOR_VERSION "${ROOT_MINOR_VERSION}" PARENT_SCOPE) diff --git a/config/rootrc.in b/config/rootrc.in index 19551c541a608..f1bfd4c775200 100644 --- a/config/rootrc.in +++ b/config/rootrc.in @@ -255,6 +255,8 @@ WebGui.HttpBind: WebGui.HttpLoopback: yes # Require unique single-time token (key) for connecting with widget (default - yes) WebGui.OnetimeKey: yes +# Only single connection allowed to any web widget +WebGui.SingleConnMode: yes # Use https protocol for the http server (default - no) WebGui.UseHttps: no WebGui.ServerCert: rootserver.pem diff --git a/config/rootssh b/config/rootssh index a8afb94a70609..0ef88feb179c3 100755 --- a/config/rootssh +++ b/config/rootssh @@ -42,25 +42,39 @@ elif [[ "$1" == "--as-listener--" ]] ; then used_browser=$4 flag=1 + NUM=1 - while [ $flag -ne 0 ] ; do + touch $listener_socket.log - line="$(nc -l -U $listener_socket)" + # on MacOS it is not possible to start netcat multiple times with same socket + # therefore run it permanently and redirect output to log file + nc -k -l -U $listener_socket >$listener_socket.log 2>/dev/null & - if [[ "${line:0:5}" == "http:" ]] ; then - remoteport=${line:5} - # echo "Want to map remote port $localport:localhost:$remoteport" - elif [[ "${line:0:7}" == "socket:" ]] ; then - remotesocket=${line:7} - # echo "Remote socket was created $remotesocket" + #remember processid to be able kill it + nc_procid=$! + + # protect socket and log file from reading + chmod 0700 $listener_socket $listener_socket.log + + # remove netcat listening on socket + trap "kill -SIGINT $nc_procid >/dev/null 2>&1; rm -f $listener_socket.log" 0 1 2 3 6 + + while [[ ($flag -ne 0) && (-f $listener_socket.log) ]] ; do + + line=$(sed "${NUM}q;d" $listener_socket.log) + + if [[ "${line}" == "" ]] ; then + sleep 0.2 elif [[ "${line:0:4}" == "win:" ]] ; then + NUM=$((NUM+1)) winurl=${line:4} # echo "Start window http://localhost:$local_port/$winurl" $used_browser "http://localhost:$local_port/$winurl" elif [[ "$line" == "stop" ]] ; then + # echo "Get stop command $line" flag=0 else - echo "Command not recognized $line - stop" + echo "rootssh: got $line - not recoginzed, stop listener" flag=0 fi done @@ -147,7 +161,10 @@ else listener_processid=$! - # start ssh + # by the exit kill listener and remove temporary files + trap "kill -SIGINT $listener_processid > /dev/null 2>&1; rm -f $listener_local $listener_local.log $listener_remote $root_socket" 0 1 2 3 6 + + # starting ssh if [[ "x$ssh_command" == "x" ]] ; then ssh_command="\$SHELL" @@ -156,15 +173,4 @@ else ssh -t -R $listener_remote:$listener_local -L $localport:$root_socket $ssh_destination $ssh_args \ "chmod 0700 $listener_remote; export ROOT_WEBDISPLAY=server; export ROOT_LISTENER_SOCKET=$listener_remote; export ROOT_WEBGUI_SOCKET=$root_socket; $ssh_command; rm -f $listener_remote $root_socket" - # try to stop listener with "stop" message - - echo "stop" | nc -U $listener_local -q 1 >/dev/null 2>&1 - - # Kill listener process - - kill -9 $listener_processid > /dev/null 2>&1 - - # Remove temporary files - - rm -f $listener_local $listener_remote fi diff --git a/core/base/inc/TSystem.h b/core/base/inc/TSystem.h index 72881a775b2b3..ccd1aee4c651b 100644 --- a/core/base/inc/TSystem.h +++ b/core/base/inc/TSystem.h @@ -38,6 +38,16 @@ class TSeqCollection; class TFdSet; class TVirtualMutex; +/*! \enum ESocketBindOption + \brief Options for binging the sockets created + + These values can be used to configure the binding of the opened sockets. +*/ +enum ESocketBindOption { + kInaddrAny = 0, ///< Any address for socket binding + kInaddrLoopback = 1, ///< Refers to the local host via the loopback device +}; + enum EAccessMode { kFileExists = 0, kExecutePermission = 1, @@ -501,8 +511,9 @@ class TSystem : public TNamed { virtual int GetServiceByName(const char *service); virtual char *GetServiceByPort(int port); virtual int OpenConnection(const char *server, int port, int tcpwindowsize = -1, const char *protocol = "tcp"); - virtual int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1); - virtual int AnnounceUdpService(int port, int backlog); + virtual int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1, + ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny); + virtual int AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny); virtual int AnnounceUnixService(int port, int backlog); virtual int AnnounceUnixService(const char *sockpath, int backlog); virtual int AcceptConnection(int sock); diff --git a/core/base/src/TColor.cxx b/core/base/src/TColor.cxx index 752dbaee5614d..f53f0bfedac1a 100644 --- a/core/base/src/TColor.cxx +++ b/core/base/src/TColor.cxx @@ -1246,7 +1246,7 @@ void TColor::InitializeColors() new TColor(kP6Red, 228./255., 37./255., 54./255., "kP6Red"); new TColor(kP6Grape, 150./255., 74./255., 139./255., "kP6Grape"); new TColor(kP6Gray, 156./255., 156./255., 161./255., "kP6Gray"); - new TColor(kP6Violet, 112./255., 33./255., 221./255., "kP6Violet"); + new TColor(kP6Violet, 122./255., 33./255., 221./255., "kP6Violet"); new TColor(kP8Blue, 24./255., 69./255., 251./255., "kP8Blue"); new TColor(kP8Orange, 1., 94./255., 2./255., "kP8Orange"); diff --git a/core/base/src/TROOT.cxx b/core/base/src/TROOT.cxx index de68e9de9ba54..d06ea08a0d9bf 100644 --- a/core/base/src/TROOT.cxx +++ b/core/base/src/TROOT.cxx @@ -2833,7 +2833,6 @@ void TROOT::SetWebDisplay(const char *webdisplay) const char *wd = webdisplay ? webdisplay : ""; // store default values to set them back when needed - static TString canName = gEnv->GetValue("Canvas.Name", ""); static TString brName = gEnv->GetValue("Browser.Name", ""); static TString trName = gEnv->GetValue("TreeViewer.Name", ""); static TString geomName = gEnv->GetValue("GeomPainter.Name", ""); @@ -2867,9 +2866,9 @@ void TROOT::SetWebDisplay(const char *webdisplay) } if (fIsWebDisplay) { - // restore canvas and browser classes configured at the moment when gROOT->SetWebDisplay() was called for the first time + // restore browser classes configured at the moment when gROOT->SetWebDisplay() was called for the first time // This is necessary when SetWebDisplay() called several times and therefore current settings may differ - gEnv->SetValue("Canvas.Name", canName); + gEnv->SetValue("Canvas.Name", "TWebCanvas"); gEnv->SetValue("Browser.Name", brName); gEnv->SetValue("TreeViewer.Name", trName); gEnv->SetValue("GeomPainter.Name", geomName); diff --git a/core/base/src/TSystem.cxx b/core/base/src/TSystem.cxx index 7685bd249568f..a3c8bf639b54e 100644 --- a/core/base/src/TSystem.cxx +++ b/core/base/src/TSystem.cxx @@ -2342,7 +2342,7 @@ int TSystem::OpenConnection(const char *, int, int, const char *) //////////////////////////////////////////////////////////////////////////////// /// Announce TCP/IP service. -int TSystem::AnnounceTcpService(int, Bool_t, int, int) +int TSystem::AnnounceTcpService(int, Bool_t, int, int, ESocketBindOption) { AbstractMethod("AnnounceTcpService"); return -1; @@ -2351,7 +2351,7 @@ int TSystem::AnnounceTcpService(int, Bool_t, int, int) //////////////////////////////////////////////////////////////////////////////// /// Announce UDP service. -int TSystem::AnnounceUdpService(int, int) +int TSystem::AnnounceUdpService(int, int, ESocketBindOption) { AbstractMethod("AnnounceUdpService"); return -1; diff --git a/core/clingutils/src/TClingUtils.cxx b/core/clingutils/src/TClingUtils.cxx index 58d58dadd47a4..3746617a150be 100644 --- a/core/clingutils/src/TClingUtils.cxx +++ b/core/clingutils/src/TClingUtils.cxx @@ -22,6 +22,7 @@ #include #include #include +#include #include "RConfigure.h" #include @@ -3313,10 +3314,9 @@ void ROOT::TMetaUtils::GetCppName(std::string &out, const char *in) out.push_back(c); } - // Remove initial numbers if any - auto firstNonNumber = out.find_first_not_of("0123456789"); - if (firstNonNumber != std::string::npos) - out.replace(0,firstNonNumber,""); + // If out is empty, or if it starts with a number, it's not a valid C++ variable. Prepend a "_" + if (out.empty() || isdigit(out[0])) + out.insert(out.begin(), '_'); } static clang::SourceLocation diff --git a/core/cont/inc/TList.h b/core/cont/inc/TList.h index e9b432605c836..a86af707c175c 100644 --- a/core/cont/inc/TList.h +++ b/core/cont/inc/TList.h @@ -71,8 +71,6 @@ friend class TListIter; TList() : fAscending(kTRUE) { } - TList(TObject *) R__DEPRECATED(6, 34, "The argument is ignored. Use the default constructor TList::TList().") : fAscending(kTRUE) { } // for backward compatibility, don't use - virtual ~TList(); void Clear(Option_t *option="") override; void Delete(Option_t *option="") override; diff --git a/core/foundation/inc/ROOT/RConfig.hxx b/core/foundation/inc/ROOT/RConfig.hxx index edd7305dedb5e..2b3af250e7968 100644 --- a/core/foundation/inc/ROOT/RConfig.hxx +++ b/core/foundation/inc/ROOT/RConfig.hxx @@ -491,6 +491,14 @@ # define _R__DEPRECATED_636(REASON) _R_DEPRECATED_REMOVE_NOW(REASON) #endif +/* USE AS `R__DEPRECATED(6,38, "Not threadsafe; use TFoo::Bar().")` + To be removed by 6.38 */ +#if ROOT_VERSION_CODE <= ROOT_VERSION(6,37,0) +# define _R__DEPRECATED_638(REASON) _R__DEPRECATED_LATER(REASON) +#else +# define _R__DEPRECATED_638(REASON) _R_DEPRECATED_REMOVE_NOW(REASON) +#endif + /* USE AS `R__DEPRECATED(7,00, "Not threadsafe; use TFoo::Bar().")` To be removed by 7.00 */ #if ROOT_VERSION_CODE < ROOT_VERSION(6,99,0) diff --git a/core/foundation/inc/ROOT/RVersion.hxx b/core/foundation/inc/ROOT/RVersion.hxx index 6179eedb84a28..d97c45ceace2e 100644 --- a/core/foundation/inc/ROOT/RVersion.hxx +++ b/core/foundation/inc/ROOT/RVersion.hxx @@ -3,9 +3,9 @@ /* Update on release: */ #define ROOT_VERSION_MAJOR 6 -#define ROOT_VERSION_MINOR 33 -#define ROOT_VERSION_PATCH 01 -#define ROOT_RELEASE_DATE "Oct 10 2023" +#define ROOT_VERSION_MINOR 34 +#define ROOT_VERSION_PATCH 4 +#define ROOT_RELEASE_DATE "Feb 10 2025" /* Don't change the lines below. */ @@ -23,11 +23,26 @@ #define ROOT_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) #define ROOT_VERSION_CODE ROOT_VERSION(ROOT_VERSION_MAJOR, ROOT_VERSION_MINOR, ROOT_VERSION_PATCH) -#define R__VERS_QUOTE1(P) #P -#define R__VERS_QUOTE(P) R__VERS_QUOTE1(P) +#define R__VERS_QUOTE1_MAJOR(P) #P +#define R__VERS_QUOTE_MAJOR(P) R__VERS_QUOTE1_MAJOR(P) -#define ROOT_RELEASE R__VERS_QUOTE(ROOT_VERSION_MAJOR) \ - "." R__VERS_QUOTE(ROOT_VERSION_MINOR) \ - "." R__VERS_QUOTE(ROOT_VERSION_PATCH) + +#if ROOT_VERSION_MINOR < 10 +#define R__VERS_QUOTE1_MINOR(P) "0" #P +#else +#define R__VERS_QUOTE1_MINOR(P) #P +#endif +#define R__VERS_QUOTE_MINOR(P) R__VERS_QUOTE1_MINOR(P) + +#if ROOT_VERSION_PATCH < 10 +#define R__VERS_QUOTE1_PATCH(P) "0" #P +#else +#define R__VERS_QUOTE1_PATCH(P) #P +#endif +#define R__VERS_QUOTE_PATCH(P) R__VERS_QUOTE1_PATCH(P) + +#define ROOT_RELEASE R__VERS_QUOTE_MAJOR(ROOT_VERSION_MAJOR) \ + "." R__VERS_QUOTE_MINOR(ROOT_VERSION_MINOR) \ + "." R__VERS_QUOTE_PATCH(ROOT_VERSION_PATCH) #endif // ROOT_RVERSION_H diff --git a/core/meta/inc/TClass.h b/core/meta/inc/TClass.h index ff250aae7ef5a..e8c1a16339d47 100644 --- a/core/meta/inc/TClass.h +++ b/core/meta/inc/TClass.h @@ -307,7 +307,7 @@ friend class TStreamerInfo; void SetClassSize(Int_t sizof) { fSizeof = sizof; } TVirtualStreamerInfo* DetermineCurrentStreamerInfo(); - void SetStreamerImpl(); + void SetStreamerImpl(Int_t streamerType); void SetRuntimeProperties(); diff --git a/core/meta/src/TClass.cxx b/core/meta/src/TClass.cxx index d7658a38027e1..817e6ce4d7a47 100644 --- a/core/meta/src/TClass.cxx +++ b/core/meta/src/TClass.cxx @@ -751,7 +751,9 @@ void TDumpMembers::Inspect(TClass *cl, const char *pname, const char *mname, con line[kvalue] = 0; } } else { - strncpy(&line[kvalue], membertype->AsString(p3pointer), TMath::Min(kline-1-kvalue,(int)strlen(membertype->AsString(p3pointer)))); + line[kvalue] = '-'; + line[kvalue+1] = '>'; + strncpy(&line[kvalue+2], membertype->AsString(p3pointer), TMath::Min(kline-1-kvalue-2,(int)strlen(membertype->AsString(p3pointer)))); } } else if (!strcmp(memberFullTypeName, "char*") || !strcmp(memberFullTypeName, "const char*")) { @@ -6166,14 +6168,21 @@ Long_t TClass::Property() const // Avoid asking about the class when it is still building if (TestBit(kLoading)) return fProperty; + if (fStreamerType != kDefault && !HasInterpreterInfo()) { + // We have no interpreter information but we already set the streamer type + // so we have already been here and have no new information, then let's + // give up. See the code at this end of this routine (else branch of the + // `if (HasInterpreterInfo()` for the path we took before. + return 0; + } + // When called via TMapFile (e.g. Update()) make sure that the dictionary // gets allocated on the heap and not in the mapped file. TMmallocDescTemp setreset; TClass *kl = const_cast(this); - kl->fStreamerType = TClass::kDefault; - kl->fStreamerImpl = &TClass::StreamerDefault; + Int_t streamerType = TClass::kDefault; if (InheritsFrom(TObject::Class())) { kl->SetBit(kIsTObject); @@ -6182,8 +6191,7 @@ Long_t TClass::Property() const Int_t delta = kl->GetBaseClassOffsetRecurse(TObject::Class()); if (delta==0) kl->SetBit(kStartWithTObject); - kl->fStreamerType = kTObject; - kl->fStreamerImpl = &TClass::StreamerTObject; + streamerType = kTObject; } if (HasInterpreterInfo()) { @@ -6195,33 +6203,30 @@ Long_t TClass::Property() const if (!const_cast(this)->GetClassMethodWithPrototype("Streamer","TBuffer&",kFALSE)) { kl->SetBit(kIsForeign); - kl->fStreamerType = kForeign; - kl->fStreamerImpl = &TClass::StreamerStreamerInfo; + streamerType = kForeign; - } else if ( kl->fStreamerType == TClass::kDefault ) { + } else if (streamerType == TClass::kDefault) { if (kl->fConvStreamerFunc) { - kl->fStreamerType = kInstrumented; - kl->fStreamerImpl = &TClass::ConvStreamerInstrumented; + streamerType = kInstrumented; } else if (kl->fStreamerFunc) { - kl->fStreamerType = kInstrumented; - kl->fStreamerImpl = &TClass::StreamerInstrumented; + streamerType = kInstrumented; } else { // We have an automatic streamer using the StreamerInfo .. no need to go through the // Streamer method function itself. - kl->fStreamerType = kInstrumented; - kl->fStreamerImpl = &TClass::StreamerStreamerInfo; + streamerType = kInstrumented; } } if (fStreamer) { - kl->fStreamerType = kExternal; - kl->fStreamerImpl = &TClass::StreamerExternal; + streamerType = kExternal; } if (const_cast(this)->GetClassMethodWithPrototype("Hash", "", kTRUE)) { kl->SetBit(kHasLocalHashMember); } + kl->SetStreamerImpl(streamerType); + if (GetClassInfo()) { // In the case where the TClass for one of ROOT's core class // (eg TClonesArray for map) is requested @@ -6236,15 +6241,16 @@ Long_t TClass::Property() const // and think all test bits have been properly set. kl->fProperty = gCling->ClassInfo_Property(fClassInfo); } + } else { if (fStreamer) { - kl->fStreamerType = kExternal; - kl->fStreamerImpl = &TClass::StreamerExternal; + streamerType = kExternal; } - kl->fStreamerType |= kEmulatedStreamer; - kl->SetStreamerImpl(); + streamerType |= kEmulatedStreamer; + + kl->SetStreamerImpl(streamerType); // fProperty was *not* set so that it can be forced to be recalculated // next time. return 0; @@ -6279,8 +6285,9 @@ void TClass::SetRuntimeProperties() /// Internal routine to set fStreamerImpl based on the value of /// fStreamerType. -void TClass::SetStreamerImpl() +void TClass::SetStreamerImpl(Int_t StreamerType) { + fStreamerType = StreamerType; switch (fStreamerType) { case kTObject: fStreamerImpl = &TClass::StreamerTObject; break; case kForeign: fStreamerImpl = &TClass::StreamerStreamerInfo; break; diff --git a/core/meta/src/TProtoClass.cxx b/core/meta/src/TProtoClass.cxx index 26674c1cedd79..1c53d2c73f256 100644 --- a/core/meta/src/TProtoClass.cxx +++ b/core/meta/src/TProtoClass.cxx @@ -304,7 +304,6 @@ Bool_t TProtoClass::FillTClass(TClass* cl) { cl->fCanSplit = fCanSplit; cl->fProperty = fProperty; cl->fClassProperty = fClassProperty; - cl->fStreamerType = fStreamerType; // Update pointers to TClass if (cl->fBase.load()) { @@ -405,7 +404,7 @@ Bool_t TProtoClass::FillTClass(TClass* cl) { cl->fRealData = new TList(); // FIXME: this should really become a THashList! } - cl->SetStreamerImpl(); + cl->SetStreamerImpl(fStreamerType); // set to zero in order not to delete when protoclass is deleted fBase = nullptr; diff --git a/core/testsupport/src/TestSupport.cxx b/core/testsupport/src/TestSupport.cxx index 402f57e83a820..9889a432f408f 100644 --- a/core/testsupport/src/TestSupport.cxx +++ b/core/testsupport/src/TestSupport.cxx @@ -57,16 +57,6 @@ static struct ForbidDiagnostics { } // FIXME: RNTuple warns that it's in beta stage. - if (level == kWarning - && strstr(msg, "The RNTuple file format will change. Do not store real data with this version of RNTuple!") != nullptr) { - std::cerr << "Warning in " << location << " " << msg << std::endl; - return; - } - if (level == kWarning - && strstr(msg, "Pre-release format version: RC ") != nullptr) { - std::cerr << "Warning in " << location << " " << msg << std::endl; - return; - } if (level == kWarning && strstr(msg, "Merging RNTuples is experimental") != nullptr) { std::cerr << "Warning in " << location << " " << msg << std::endl; return; diff --git a/core/unix/inc/TUnixSystem.h b/core/unix/inc/TUnixSystem.h index 35b24f3482fa7..9f89288d96de1 100644 --- a/core/unix/inc/TUnixSystem.h +++ b/core/unix/inc/TUnixSystem.h @@ -62,8 +62,8 @@ class TUnixSystem : public TSystem { static int UnixUnixConnect(int port); static int UnixUnixConnect(const char *path); static int UnixTcpService(int port, Bool_t reuse, int backlog, - int tcpwindowsize); - static int UnixUdpService(int port, int backlog); + int tcpwindowsize, ESocketBindOption socketBindOption); + static int UnixUdpService(int port, int backlog, ESocketBindOption socketBindOption); static int UnixUnixService(int port, int backlog); static int UnixUnixService(const char *sockpath, int backlog); static int UnixRecv(int sock, void *buf, int len, int flag); @@ -197,8 +197,8 @@ class TUnixSystem : public TSystem { char *GetServiceByPort(int port) override; int ConnectService(const char *server, int port, int tcpwindowsize, const char *protocol = "tcp"); int OpenConnection(const char *server, int port, int tcpwindowsize = -1, const char *protocol = "tcp") override; - int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1) override; - int AnnounceUdpService(int port, int backlog) override; + int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny) override; + int AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny) override; int AnnounceUnixService(int port, int backlog) override; int AnnounceUnixService(const char *sockpath, int backlog) override; int AcceptConnection(int sock) override; diff --git a/core/unix/src/TUnixSystem.cxx b/core/unix/src/TUnixSystem.cxx index 83be91c61df09..4f6f8c5e45602 100644 --- a/core/unix/src/TUnixSystem.cxx +++ b/core/unix/src/TUnixSystem.cxx @@ -3239,17 +3239,17 @@ int TUnixSystem::OpenConnection(const char *server, int port, int tcpwindowsize, /// or -3 if listen() failed. int TUnixSystem::AnnounceTcpService(int port, Bool_t reuse, int backlog, - int tcpwindowsize) + int tcpwindowsize, ESocketBindOption socketBindOption) { - return UnixTcpService(port, reuse, backlog, tcpwindowsize); + return UnixTcpService(port, reuse, backlog, tcpwindowsize, socketBindOption); } //////////////////////////////////////////////////////////////////////////////// /// Announce UDP service. -int TUnixSystem::AnnounceUdpService(int port, int backlog) +int TUnixSystem::AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption) { - return UnixUdpService(port, backlog); + return UnixUdpService(port, backlog, socketBindOption); } //////////////////////////////////////////////////////////////////////////////// @@ -4291,11 +4291,13 @@ int TUnixSystem::UnixUnixConnect(const char *sockpath) /// Use tcpwindowsize to specify the size of the receive buffer, it has /// to be specified here to make sure the window scale option is set (for /// tcpwindowsize > 65KB and for platforms supporting window scaling). +/// The socketBindOption parameter allows to specify how the socket will be +/// bound. See the documentation of ESocketBindOption for the details. /// Returns socket fd or -1 if socket() failed, -2 if bind() failed /// or -3 if listen() failed. int TUnixSystem::UnixTcpService(int port, Bool_t reuse, int backlog, - int tcpwindowsize) + int tcpwindowsize, ESocketBindOption socketBindOption) { const short kSOCKET_MINPORT = 5000, kSOCKET_MAXPORT = 15000; short sport, tryport = kSOCKET_MINPORT; @@ -4329,7 +4331,7 @@ int TUnixSystem::UnixTcpService(int port, Bool_t reuse, int backlog, struct sockaddr_in inserver; memset(&inserver, 0, sizeof(inserver)); inserver.sin_family = AF_INET; - inserver.sin_addr.s_addr = htonl(INADDR_ANY); + inserver.sin_addr.s_addr = socketBindOption == ESocketBindOption::kInaddrAny ? htonl(INADDR_ANY) : htonl(INADDR_LOOPBACK); inserver.sin_port = sport; // Bind socket @@ -4369,8 +4371,10 @@ int TUnixSystem::UnixTcpService(int port, Bool_t reuse, int backlog, /// how many sockets can be waiting to be accepted. If port is 0 a port /// scan will be done to find a free port. This option is mutual exlusive /// with the reuse option. +/// The socketBindOption parameter allows to specify how the socket will be +/// bound. See the documentation of ESocketBindOption for the details. -int TUnixSystem::UnixUdpService(int port, int backlog) +int TUnixSystem::UnixUdpService(int port, int backlog, ESocketBindOption socketBindOption) { const short kSOCKET_MINPORT = 5000, kSOCKET_MAXPORT = 15000; short sport, tryport = kSOCKET_MINPORT; @@ -4391,7 +4395,7 @@ int TUnixSystem::UnixUdpService(int port, int backlog) struct sockaddr_in inserver; memset(&inserver, 0, sizeof(inserver)); inserver.sin_family = AF_INET; - inserver.sin_addr.s_addr = htonl(INADDR_ANY); + inserver.sin_addr.s_addr = socketBindOption == ESocketBindOption::kInaddrAny ? htonl(INADDR_ANY) : htonl(INADDR_LOOPBACK); inserver.sin_port = sport; // Bind socket diff --git a/core/winnt/inc/TWinNTSystem.h b/core/winnt/inc/TWinNTSystem.h index 675bb3f5c743c..59c5031336244 100644 --- a/core/winnt/inc/TWinNTSystem.h +++ b/core/winnt/inc/TWinNTSystem.h @@ -232,8 +232,8 @@ class TWinNTSystem : public TSystem { int GetServiceByName(const char *service) override; char *GetServiceByPort(int port) override; int OpenConnection(const char *server, int port, int tcpwindowsize = -1, const char *protocol = "tcp") override; - int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1) override; - int AnnounceUdpService(int port, int backlog) override; + int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny) override; + int AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny) override; int AnnounceUnixService(int port, int backlog) override; int AnnounceUnixService(const char *sockpath, int backlog) override; int AcceptConnection(int sock) override; diff --git a/core/winnt/src/TWinNTSystem.cxx b/core/winnt/src/TWinNTSystem.cxx index 91071a4c95cf9..b47c594d26831 100644 --- a/core/winnt/src/TWinNTSystem.cxx +++ b/core/winnt/src/TWinNTSystem.cxx @@ -5377,11 +5377,13 @@ int TWinNTSystem::OpenConnection(const char *server, int port, int tcpwindowsize /// Use tcpwindowsize to specify the size of the receive buffer, it has /// to be specified here to make sure the window scale option is set (for /// tcpwindowsize > 65KB and for platforms supporting window scaling). +/// The socketBindOption parameter allows to specify how the socket will be +/// bound. See the documentation of ESocketBindOption for the details. /// Returns socket fd or -1 if socket() failed, -2 if bind() failed /// or -3 if listen() failed. int TWinNTSystem::AnnounceTcpService(int port, Bool_t reuse, int backlog, - int tcpwindowsize) + int tcpwindowsize, ESocketBindOption socketBindOption) { short sport; struct servent *sp; @@ -5424,7 +5426,7 @@ int TWinNTSystem::AnnounceTcpService(int port, Bool_t reuse, int backlog, struct sockaddr_in inserver; memset(&inserver, 0, sizeof(inserver)); inserver.sin_family = AF_INET; - inserver.sin_addr.s_addr = ::htonl(INADDR_ANY); + inserver.sin_addr.s_addr = socketBindOption == ESocketBindOption::kInaddrAny ? ::htonl(INADDR_ANY) : ::htonl(INADDR_LOOPBACK); inserver.sin_port = sport; // Bind socket @@ -5458,13 +5460,15 @@ int TWinNTSystem::AnnounceTcpService(int port, Bool_t reuse, int backlog, //////////////////////////////////////////////////////////////////////////////// /// Announce UDP service. -int TWinNTSystem::AnnounceUdpService(int port, int backlog) +int TWinNTSystem::AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption) { // Open a socket, bind to it and start listening for UDP connections // on the port. If reuse is true reuse the address, backlog specifies // how many sockets can be waiting to be accepted. If port is 0 a port // scan will be done to find a free port. This option is mutual exlusive // with the reuse option. + // The socketBindOption parameter allows to specify how the socket will be + // bound. See the documentation of ESocketBindOption for the details. const short kSOCKET_MINPORT = 5000, kSOCKET_MAXPORT = 15000; short sport, tryport = kSOCKET_MINPORT; @@ -5485,7 +5489,7 @@ int TWinNTSystem::AnnounceUdpService(int port, int backlog) struct sockaddr_in inserver; memset(&inserver, 0, sizeof(inserver)); inserver.sin_family = AF_INET; - inserver.sin_addr.s_addr = htonl(INADDR_ANY); + inserver.sin_addr.s_addr = socketBindOption == ESocketBindOption::kInaddrAny ? htonl(INADDR_ANY) : htonl(INADDR_LOOPBACK); inserver.sin_port = sport; // Bind socket diff --git a/core/zip/inc/Compression.h b/core/zip/inc/Compression.h index 463d10baad2d3..0eb9bee10671a 100644 --- a/core/zip/inc/Compression.h +++ b/core/zip/inc/Compression.h @@ -114,7 +114,7 @@ struct RCompressionSetting { }; // clang-format off -enum R__DEPRECATED(6, 34, "Use RCompressionSetting::EAlgorithm instead") ECompressionAlgorithm { +enum R__DEPRECATED(6, 36, "Use RCompressionSetting::EAlgorithm instead") ECompressionAlgorithm { kUseGlobalCompressionSetting = static_cast(RCompressionSetting::EAlgorithm::kUseGlobal), kUseGlobalSetting = static_cast(RCompressionSetting::EAlgorithm::kUseGlobal), kZLIB = static_cast(RCompressionSetting::EAlgorithm::kZLIB), @@ -128,7 +128,7 @@ enum R__DEPRECATED(6, 34, "Use RCompressionSetting::EAlgorithm instead") ECompre int CompressionSettings(RCompressionSetting::EAlgorithm::EValues algorithm, int compressionLevel); int CompressionSettings(ROOT::ECompressionAlgorithm algorithm, int compressionLevel) - R__DEPRECATED(6, 34, "Use the overload accepting RCompressionSetting::EAlgorithm instead"); + R__DEPRECATED(6, 36, "Use the overload accepting RCompressionSetting::EAlgorithm instead"); // clang-format on } // namespace ROOT diff --git a/core/zip/inc/RZip.h b/core/zip/inc/RZip.h index 043446f2d165c..a3433b56c5b8d 100644 --- a/core/zip/inc/RZip.h +++ b/core/zip/inc/RZip.h @@ -33,7 +33,7 @@ extern "C" ROOT::RCompressionSetting::EAlgorithm::EValues R__getCompressionAlgor * R__zipMultipleAlgorithm instead. */ extern "C" void R__zip(int cxlevel, int *srcsize, char *src, int *tgtsize, char *tgt, int *irep) - R__DEPRECATED(6, 34, "use R__zipMultipleAlgorithm instead"); + R__DEPRECATED(6, 36, "use R__zipMultipleAlgorithm instead"); extern "C" void R__unzip(int *srcsize, unsigned char *src, int *tgtsize, unsigned char *tgt, int *irep); diff --git a/documentation/doxygen/Doxyfile b/documentation/doxygen/Doxyfile index 56b40b16c1d8e..9ed9156310359 100644 --- a/documentation/doxygen/Doxyfile +++ b/documentation/doxygen/Doxyfile @@ -1594,7 +1594,7 @@ SITEMAP_URL = # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. -GENERATE_QHP = YES +GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to @@ -1649,7 +1649,7 @@ QHP_SECT_FILTER_ATTRS = # run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. -QHG_LOCATION = qhelpgenerator +QHG_LOCATION = qhelpgenerator-qt5 # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To diff --git a/documentation/doxygen/DoxygenLayout.xml b/documentation/doxygen/DoxygenLayout.xml index dc37f2e2ef8a3..3b2b0eb707ff7 100644 --- a/documentation/doxygen/DoxygenLayout.xml +++ b/documentation/doxygen/DoxygenLayout.xml @@ -1,10 +1,10 @@ - + - + diff --git a/documentation/doxygen/Makefile b/documentation/doxygen/Makefile index fdf6add7b0e47..69e7749f5ba50 100644 --- a/documentation/doxygen/Makefile +++ b/documentation/doxygen/Makefile @@ -65,7 +65,6 @@ doxygen: filter pyzdoc doxygen bash ./CleanNamespaces.sh gzip $(DOXYGEN_IMAGE_PATH)/ROOT.tag - gzip $(DOXYGEN_IMAGE_PATH)/ROOT.qch rm -rf files c1* *.ps *.eps *.png *.jpg *.tex *.svg *.pdf *.root *.xpm *.out *.dat *.dtd *.dot *.txt *.csv *.log *.rs rm -rf listofclass.sh tmva* data* result* config* test* Roo* My* Freq* rm -f Doxyfile_INPUT filter htmlfooter.html MDF.C pca.C diff --git a/documentation/users-guide/WebDisplay.md b/documentation/users-guide/WebDisplay.md index c84a58f1b09d4..072a615661025 100644 --- a/documentation/users-guide/WebDisplay.md +++ b/documentation/users-guide/WebDisplay.md @@ -20,7 +20,8 @@ auto win = ROOT::RWebWindow::Create(); // set HTML page which is showed when window displayed win->SetDefaultPage("file:page.html"); // set -// allow unlimitted user connections to the window (default only 1) +// allow unlimited user connections to the window (default only 1) +ROOT::RWebWindowsManager::SetSingleConnMode(false); win->SetConnLimit(0); // configure predefined geometry diff --git a/geom/geom/inc/TGeoBBox.h b/geom/geom/inc/TGeoBBox.h index a6f90a414ee00..9a95d0d8af11b 100644 --- a/geom/geom/inc/TGeoBBox.h +++ b/geom/geom/inc/TGeoBBox.h @@ -38,7 +38,7 @@ class TGeoBBox : public TGeoShape { // methods static Bool_t AreOverlapping(const TGeoBBox *box1, const TGeoMatrix *mat1, const TGeoBBox *box2, const TGeoMatrix *mat2) - R__DEPRECATED(6, 34, "DEPRECATED, DO NOT USE ! The overlap detection does not work for all cases"); + R__DEPRECATED(6, 36, "DEPRECATED, DO NOT USE ! The overlap detection does not work for all cases"); Double_t Capacity() const override; void ComputeBBox() override; diff --git a/geom/geom/inc/bvh/v2/bvh.h b/geom/geom/inc/bvh/v2/bvh.h index bbf7c117530fd..1c4f04ecda9b3 100644 --- a/geom/geom/inc/bvh/v2/bvh.h +++ b/geom/geom/inc/bvh/v2/bvh.h @@ -146,10 +146,12 @@ void Bvh::traverse_top_down(Index start, Stack& stack, LeafFn&& leaf_fn, I stack.push(far_index); } top = near_index; - } else if (hit_right) + } else if (hit_right) { top = right.index; - else [[unlikely]] + } + else [[unlikely]] { goto restart; + } } [[maybe_unused]] auto was_hit = leaf_fn(top.first_id(), top.first_id() + top.prim_count()); @@ -163,19 +165,18 @@ template template void Bvh::intersect(const Ray& ray, Index start, Stack& stack, LeafFn&& leaf_fn, InnerFn&& inner_fn) const { auto inv_dir = ray.template get_inv_dir(); - auto inv_org = -inv_dir * ray.org; - auto inv_dir_pad = ray.pad_inv_dir(inv_dir); + auto inv_dir_pad_or_inv_org = IsRobust ? ray.pad_inv_dir(inv_dir) : -inv_dir * ray.org; auto octant = ray.get_octant(); traverse_top_down(start, stack, leaf_fn, [&] (const Node& left, const Node& right) { inner_fn(left, right); std::pair intr_left, intr_right; if constexpr (IsRobust) { - intr_left = left .intersect_robust(ray, inv_dir, inv_dir_pad, octant); - intr_right = right.intersect_robust(ray, inv_dir, inv_dir_pad, octant); + intr_left = left .intersect_robust(ray, inv_dir, inv_dir_pad_or_inv_org, octant); + intr_right = right.intersect_robust(ray, inv_dir, inv_dir_pad_or_inv_org, octant); } else { - intr_left = left .intersect_fast(ray, inv_dir, inv_org, octant); - intr_right = right.intersect_fast(ray, inv_dir, inv_org, octant); + intr_left = left .intersect_fast(ray, inv_dir, inv_dir_pad_or_inv_org, octant); + intr_right = right.intersect_fast(ray, inv_dir, inv_dir_pad_or_inv_org, octant); } return std::make_tuple( intr_left.first <= intr_left.second, diff --git a/geom/geom/src/TGeoManager.cxx b/geom/geom/src/TGeoManager.cxx index c79e072100e2b..a38f22e05f320 100644 --- a/geom/geom/src/TGeoManager.cxx +++ b/geom/geom/src/TGeoManager.cxx @@ -2916,9 +2916,15 @@ Int_t TGeoManager::GetByteCount(Option_t * /*option*/) TVirtualGeoPainter *TGeoManager::GetGeomPainter() { if (!fPainter) { - const char *kind = gEnv->GetValue("GeomPainter.Name", ""); + const char *kind = nullptr; + if (gPad) + kind = gPad->IsWeb() ? "web" : "root"; + else + kind = gEnv->GetValue("GeomPainter.Name", ""); + if (!kind || !*kind) kind = (gROOT->IsWebDisplay() && !gROOT->IsWebDisplayBatch()) ? "web" : "root"; + if (auto h = gROOT->GetPluginManager()->FindHandler("TVirtualGeoPainter", kind)) { if (h->LoadPlugin() == -1) { Error("GetGeomPainter", "could not load plugin for %s geo_painter", kind); diff --git a/graf2d/gpad/src/TPad.cxx b/graf2d/gpad/src/TPad.cxx index 7cd2e1b343e96..8fd260481c50c 100644 --- a/graf2d/gpad/src/TPad.cxx +++ b/graf2d/gpad/src/TPad.cxx @@ -3435,8 +3435,8 @@ void TPad::FillCollideGridTH1(TObject *o) else y2l = fUymin; } y2 = (Int_t)((y2l-fY1)/ys); - for (j=y1; j<=y2; j++) { - NotFree(x1, j); + for (j=y1; jGetBinLowEdge(i); diff --git a/graf2d/gpadv7/inc/ROOT/RVirtualCanvasPainter.hxx b/graf2d/gpadv7/inc/ROOT/RVirtualCanvasPainter.hxx index be2a467280bc9..f5f255ec485e1 100644 --- a/graf2d/gpadv7/inc/ROOT/RVirtualCanvasPainter.hxx +++ b/graf2d/gpadv7/inc/ROOT/RVirtualCanvasPainter.hxx @@ -9,6 +9,7 @@ #ifndef ROOT7_RVirtualCanvasPainter #define ROOT7_RVirtualCanvasPainter +#include #include #include #include diff --git a/graf3d/eve7/inc/ROOT/REveChunkManager.hxx b/graf3d/eve7/inc/ROOT/REveChunkManager.hxx index 538884ee4adad..a03c481f66cd8 100644 --- a/graf3d/eve7/inc/ROOT/REveChunkManager.hxx +++ b/graf3d/eve7/inc/ROOT/REveChunkManager.hxx @@ -17,6 +17,7 @@ #include "TArrayC.h" #include +#include namespace ROOT { namespace Experimental { diff --git a/graf3d/eve7/inc/ROOT/REveElement.hxx b/graf3d/eve7/inc/ROOT/REveElement.hxx index 0f9966a138b59..b81866194237f 100644 --- a/graf3d/eve7/inc/ROOT/REveElement.hxx +++ b/graf3d/eve7/inc/ROOT/REveElement.hxx @@ -15,6 +15,7 @@ #include #include #include +#include "TString.h" #include #include @@ -339,6 +340,14 @@ protected: UChar_t fChangeBits{0}; //! Char_t fDestructing{kNone}; //! + static thread_local REveElement *stlMirAlpha; + static thread_local int stlMirError; + static thread_local std::string stlMirErrorString; + static void ClearMirContext(); + static void SetMirContext(REveElement *el); + static void SetMirError(int error, std::string_view err_str=""); + static void AppendMirErrorString(std::string_view err_str); + public: void StampColorSelection() { AddStamp(kCBColorSelection); } void StampTransBBox() { AddStamp(kCBTransBBox); } diff --git a/graf3d/eve7/inc/ROOT/REveGeoShape.hxx b/graf3d/eve7/inc/ROOT/REveGeoShape.hxx index 894787b1dea15..5f651b92488c3 100644 --- a/graf3d/eve7/inc/ROOT/REveGeoShape.hxx +++ b/graf3d/eve7/inc/ROOT/REveGeoShape.hxx @@ -14,6 +14,7 @@ #include +class TGeoManager; class TGeoShape; class TGeoHMatrix; class TGeoCompositeShape; diff --git a/graf3d/eve7/inc/ROOT/REveManager.hxx b/graf3d/eve7/inc/ROOT/REveManager.hxx index a9a21258ab5c5..771d8923201b9 100644 --- a/graf3d/eve7/inc/ROOT/REveManager.hxx +++ b/graf3d/eve7/inc/ROOT/REveManager.hxx @@ -158,6 +158,9 @@ protected: REveServerStatus fServerStatus; bool fIsRCore{false}; + // restricted functionality for public use + bool fHttpPublic{false}; + void WindowConnect(unsigned connid); void WindowData(unsigned connid, const std::string &arg); void WindowDisconnect(unsigned connid); @@ -281,6 +284,9 @@ public: void GetServerStatus(REveServerStatus&); bool IsRCore() const { return fIsRCore; } + + bool GetHttpPublic() { return fHttpPublic;} + void SetHttpPublic(bool); }; R__EXTERN REveManager* gEve; diff --git a/graf3d/eve7/inc/ROOT/REveTypes.hxx b/graf3d/eve7/inc/ROOT/REveTypes.hxx index 5498da002ab1e..49ccf0e6ca004 100644 --- a/graf3d/eve7/inc/ROOT/REveTypes.hxx +++ b/graf3d/eve7/inc/ROOT/REveTypes.hxx @@ -13,13 +13,15 @@ #ifndef ROOT7_REveTypes #define ROOT7_REveTypes -#include "GuiTypes.h" // For Pixel_t only, to be changed. +#include "RtypesCore.h" -#include "TString.h" +#include +#include + +typedef ULong_t Pixel_t; // from GuiTypes.h + +class TString; -#include -#include -class TGeoManager; namespace ROOT { namespace Experimental { typedef unsigned int ElementId_t; @@ -42,10 +44,12 @@ class REveException : public std::exception { std::string fWhat; public: REveException() = default; - explicit REveException(const std::string &s) : fWhat(s) {} + explicit REveException(std::string_view s) : fWhat(s) {} ~REveException() noexcept override {} - void append(const std::string &s) { fWhat.append(s); } + void append(std::string_view s) { fWhat.append(s); } + operator const std::string&() const noexcept { return fWhat; } + const std::string &str() const noexcept { return fWhat; } const char *what() const noexcept override { return fWhat.c_str(); } }; @@ -54,6 +58,9 @@ REveException operator+(const REveException &s1, const TString &s2); REveException operator+(const REveException &s1, const char *s2); REveException operator+(const REveException &s1, ElementId_t x); +inline std::ostream& operator <<(std::ostream &s, const REveException &e) +{ s << e.what(); return s; } + /// Log channel for Eve diagnostics. RLogChannel &REveLog(); diff --git a/graf3d/eve7/inc/ROOT/REveUtil.hxx b/graf3d/eve7/inc/ROOT/REveUtil.hxx index 85f4c5aada0aa..60c21caf795f3 100644 --- a/graf3d/eve7/inc/ROOT/REveUtil.hxx +++ b/graf3d/eve7/inc/ROOT/REveUtil.hxx @@ -14,12 +14,11 @@ #include "REveTypes.hxx" -#include "TError.h" - +#include #include -#include -#include +class TObject; +class TObjArray; class TGeoManager; namespace ROOT { @@ -47,6 +46,10 @@ public: static void Macro(const char *mac); static void LoadMacro(const char *mac); + // Input string verification and sanitization + + static bool VerifyObjectFilterOrTableExpression(std::string_view expr); + // Color management static void ColorFromIdx(Color_t ci, UChar_t col[4], Bool_t alpha = kTRUE); diff --git a/graf3d/eve7/inc/ROOT/REveViewer.hxx b/graf3d/eve7/inc/ROOT/REveViewer.hxx index f9082d8c85c98..82d114c9b4b5f 100644 --- a/graf3d/eve7/inc/ROOT/REveViewer.hxx +++ b/graf3d/eve7/inc/ROOT/REveViewer.hxx @@ -108,15 +108,6 @@ public: // -------------------------------- - void OnMouseOver(TObject* obj, UInt_t state); - void OnReMouseOver(TObject* obj, UInt_t state); - void OnUnMouseOver(TObject* obj, UInt_t state); - void OnClicked(TObject *obj, UInt_t button, UInt_t state); - void OnReClicked(TObject *obj, UInt_t button, UInt_t state); - void OnUnClicked(TObject *obj, UInt_t button, UInt_t state); - - // -------------------------------- - Bool_t GetShowTooltip() const { return fShowTooltip; } void SetShowTooltip(Bool_t x) { fShowTooltip = x; } diff --git a/graf3d/eve7/src/REveBoxSet.cxx b/graf3d/eve7/src/REveBoxSet.cxx index 1b79f94087024..a97317d128681 100644 --- a/graf3d/eve7/src/REveBoxSet.cxx +++ b/graf3d/eve7/src/REveBoxSet.cxx @@ -314,7 +314,7 @@ void REveBoxSet::ComputeBBox() default: { - throw(eH + "unsupported box-type."); + throw eH + "unsupported box-type."; } } // end switch box-type @@ -354,7 +354,7 @@ Int_t REveBoxSet::WriteCoreJson(nlohmann::json &j, Int_t rnr_offset) N_tex = 4*N; break; default: - std::cout << "Ereor:: Unhandled instanced case\n"; + R__LOG_ERROR(REveLog()) << "REveBoxSet::WriteCoreJson Unhandled instancing type."; } REveRenderData::CalcTextureSize(N_tex, 4, fTexX, fTexY); @@ -367,7 +367,7 @@ Int_t REveBoxSet::WriteCoreJson(nlohmann::json &j, Int_t rnr_offset) j["defHeight"] = fDefHeight; j["defDepth"] = fDefDepth; - // std::cout << "TEXTURE SIZE X " << fTexX << "\n"; + // printf("TEXTURE SIZE X=%d, Y=%d\n", fTexX, fTexY); } // AMT:: the base class WroteCoreJson needs to be called after diff --git a/graf3d/eve7/src/REveDataCollection.cxx b/graf3d/eve7/src/REveDataCollection.cxx index 505bd4930d317..c0d841a0522c5 100644 --- a/graf3d/eve7/src/REveDataCollection.cxx +++ b/graf3d/eve7/src/REveDataCollection.cxx @@ -9,10 +9,10 @@ * For the list of contributors see $ROOTSYS/README/CREDITS. * *************************************************************************/ +#include #include -#include #include -#include +#include #include "TROOT.h" #include "TMethod.h" @@ -294,21 +294,22 @@ void REveDataCollection::SetFilterExpr(const char* filter) if (fFilterExpr.Length()) { + if ( ! REveUtil::VerifyObjectFilterOrTableExpression(fFilterExpr.Data())) { + throw eh + "filter-expression verification failed."; + } std::stringstream s; s << "*((std::functionGetName() << "*)>*)" << std::hex << std::showbase << (size_t)&fFilterFoo << ") = [](" << fItemClass->GetName() << "* p){" << fItemClass->GetName() << " &i=*p; return (" << fFilterExpr.Data() << "); };"; // printf("%s\n", s.Data()); - try { - gROOT->ProcessLine(s.str().c_str()); - // AMT I don't know why ApplyFilter call is separated - ApplyFilter(); - } - catch (const std::exception &exc) - { - R__LOG_ERROR(REveLog()) << "EveDataCollection::SetFilterExpr" << exc.what(); + int err; + gROOT->ProcessLine(s.str().c_str(), &err); + if (err != TInterpreter::kNoError) { + throw eh + "filter expression compilation failed."; } + // AMT I don't know why ApplyFilter call is separated + ApplyFilter(); } else { @@ -345,7 +346,7 @@ void REveDataCollection::ApplyFilter() ii->SetFiltered( ! res ); - ids.push_back(idx++); + ids.push_back(idx++); // all of them? SetFiltered() could return status-change } StampObjProps(); fItemList->StampObjProps(); diff --git a/graf3d/eve7/src/REveDataTable.cxx b/graf3d/eve7/src/REveDataTable.cxx index aaeeff0e9da39..1e2415d769348 100644 --- a/graf3d/eve7/src/REveDataTable.cxx +++ b/graf3d/eve7/src/REveDataTable.cxx @@ -143,10 +143,9 @@ std::string REveDataColumn::GetFunctionExpressionString() const std::stringstream s; s << " *((std::function<" << rtyp << "(" << fClassType->GetName() << "*)>*)" - << std::hex << std::showbase << (size_t)fooptr - << ") = [](" << fClassType->GetName() << "* p){" << fClassType->GetName() << " &i=*p; return (" << fExpression.Data() - << "); };"; - + << std::hex << std::showbase << (size_t)fooptr + << ") = [](" << fClassType->GetName() << "* p){" << fClassType->GetName() << " &i=*p; return (" << fExpression.Data() + << "); };"; return s.str(); } diff --git a/graf3d/eve7/src/REveElement.cxx b/graf3d/eve7/src/REveElement.cxx index 18504f57e7a47..92979d79daa6d 100644 --- a/graf3d/eve7/src/REveElement.cxx +++ b/graf3d/eve7/src/REveElement.cxx @@ -117,6 +117,44 @@ REveElement::~REveElement() } } +// MIR execution variables and helper functions + +thread_local REveElement *REveElement::stlMirAlpha = nullptr; +thread_local int REveElement::stlMirError = 0; +thread_local std::string REveElement::stlMirErrorString; + +void REveElement::ClearMirContext() { + stlMirAlpha = nullptr; + stlMirError = 0; + stlMirErrorString.clear(); +} + +void REveElement::SetMirContext(REveElement *el) { + stlMirAlpha = el; +} + +void REveElement::SetMirError(int error, std::string_view err_str) { + stlMirError = error; + if ( ! err_str.empty()) { + AppendMirErrorString(err_str); + } +} + +void REveElement::AppendMirErrorString(std::string_view err_str) { + if (stlMirErrorString.empty()) { + stlMirErrorString = err_str; + } else { + std::string s; + s.reserve(stlMirErrorString.size() + err_str.size() + 4); + s = err_str; + s += " :: "; + s += stlMirErrorString; + stlMirErrorString.swap(s); + } +} + +// Element IDs etc + ElementId_t REveElement::get_mother_id() const { return fMother ? fMother->GetElementId() : 0; diff --git a/graf3d/eve7/src/REveManager.cxx b/graf3d/eve7/src/REveManager.cxx index 14cd1f264808f..98395ea409776 100644 --- a/graf3d/eve7/src/REveManager.cxx +++ b/graf3d/eve7/src/REveManager.cxx @@ -159,6 +159,9 @@ REveManager::REveManager() // !!! AMT increase threshold to enable color pick on client TColor::SetColorThreshold(0.1); + // allow multiple connections + ROOT::RWebWindowsManager::SetSingleConnMode(false); + fWebWindow = ROOT::RWebWindow::Create(); fWebWindow->UseServerThreads(); fWebWindow->SetDefaultPage("file:rootui5sys/eve7/index.html"); @@ -873,8 +876,14 @@ void REveManager::WindowData(unsigned connid, const std::string &arg) } else if (ROOT::RWebWindow::IsFileDialogMessage(arg)) { - // file dialog - ROOT::RWebWindow::EmbedFileDialog(fWebWindow, connid, arg); + if (fHttpPublic) + { + R__LOG_INFO(REveLog()) << "REveManager::WindowData, file dialog is not allowed in restriced public mode"; + } + else + { + ROOT::RWebWindow::EmbedFileDialog(fWebWindow, connid, arg); + } return; } else if (arg.compare(0, 11, "SETCHANNEL:") == 0) { @@ -920,11 +929,13 @@ void REveManager::ScheduleMIR(const std::string &cmd, ElementId_t id, const std: //____________________________________________________________________ void REveManager::ExecuteMIR(std::shared_ptr mir) { - static const REveException eh("REveManager::ExecuteMIR "); + static const REveException eh(""); // Empty -- all errors go to R__LOG //if (gDebug > 0) ::Info("REveManager::ExecuteCommand", "MIR cmd %s", mir->fCmd.c_str()); + std::string tag = mir->fCtype + "::"; + try { REveElement *el = FindElementById(mir->fId); if ( ! el) throw eh + "Element with id " + mir->fId + " not found"; @@ -945,9 +956,9 @@ void REveManager::ExecuteMIR(std::shared_ptr mir) if ( ! el_casted) throw eh + "Dynamic cast from REveElement to '" + mir->fCtype + "' failed."; - std::string tag(mir->fCtype + "::" + m.str(1)); - std::shared_ptr mc; + tag = mir->fCtype + "::" + m.str(1); + std::shared_ptr mc; auto mmi = fMethCallMap.find(tag); if (mmi != fMethCallMap.end()) { @@ -962,9 +973,16 @@ void REveManager::ExecuteMIR(std::shared_ptr mir) fMethCallMap.insert(std::make_pair(tag, mc)); } + REveElement::SetMirContext(el); + R__LOCKGUARD_CLING(gInterpreterMutex); mc->Execute(el_casted, m.str(2).c_str()); + if (REveElement::stlMirError) { + R__LOG_ERROR(REveLog()) << eh << "error executing " << tag << ":" << REveElement::stlMirErrorString << " (code " << REveElement::stlMirError << ")."; + } + REveElement::ClearMirContext(); + // Alternative implementation through Cling. "Leaks" 200 kB per call. // This might be needed for function calls that involve data-types TMethodCall // can not handle. @@ -973,9 +991,9 @@ void REveManager::ExecuteMIR(std::shared_ptr mir) // std::cout << cmd.str() << std::endl; // gROOT->ProcessLine(cmd.str().c_str()); } catch (std::exception &e) { - R__LOG_ERROR(REveLog()) << "REveManager::ExecuteCommand " << e.what() << std::endl; + R__LOG_ERROR(REveLog()) << "caught exception executing " << tag << ": " << e.what(); } catch (...) { - R__LOG_ERROR(REveLog()) << "REveManager::ExecuteCommand unknow execption \n"; + R__LOG_ERROR(REveLog()) << "caught unknown execption."; } } @@ -1019,6 +1037,7 @@ void REveManager::SendSceneChanges() jobj["log"] = nlohmann::json::array(); std::stringstream strm; for (auto entry : gEveLogEntries) { + strm.str(""); nlohmann::json item = {}; item["lvl"] = entry.fLevel; int cappedLevel = std::min(static_cast(entry.fLevel), numLevels - 1); @@ -1029,11 +1048,9 @@ void REveManager::SendSceneChanges() strm << " " << entry.fMessage; item["msg"] = strm.str(); jobj["log"].push_back(item); - strm.clear(); } gEveLogEntries.clear(); } - fWebWindow->Send(0, jobj.dump()); } @@ -1240,7 +1257,7 @@ REveManager::ChangeGuard::~ChangeGuard() gEve->EndChange(); } -// Error handler streams error-level messages to client log +// gInterpreter error handlder void REveManager::ErrorHandler(Int_t level, Bool_t abort, const char * location, const char *msg) { if (level >= kError) @@ -1275,9 +1292,20 @@ TStdExceptionHandler::EStatus REveManager::RExceptionHandler::Handle(std::except //////////////////////////////////////////////////////////////////////////////// /// Utility to stream loggs to client. +/// Return false to supress further emission bool REveManager::Logger::Handler::Emit(const RLogEntry &entry) { gEveLogEntries.emplace_back(entry); - return true; + return false; } + + +//////////////////////////////////////////////////////////////////////////////// +/// Restrict functionality for this server when open to public + +void REveManager::SetHttpPublic(bool x) +{ + R__LOG_INFO(REveLog()) << "Set public mode to " << x <<"."; + fHttpPublic = x; +} \ No newline at end of file diff --git a/graf3d/eve7/src/REveProjections.cxx b/graf3d/eve7/src/REveProjections.cxx index 7e340404dc2d2..83fd85a989403 100644 --- a/graf3d/eve7/src/REveProjections.cxx +++ b/graf3d/eve7/src/REveProjections.cxx @@ -9,12 +9,13 @@ * For the list of contributors see $ROOTSYS/README/CREDITS. * *************************************************************************/ -#include "TError.h" - #include #include #include +#include "TError.h" +#include "TString.h" + #include using namespace ROOT::Experimental; diff --git a/graf3d/eve7/src/REveTableInfo.cxx b/graf3d/eve7/src/REveTableInfo.cxx index 9a4caf4d1efc3..91f84e7686723 100644 --- a/graf3d/eve7/src/REveTableInfo.cxx +++ b/graf3d/eve7/src/REveTableInfo.cxx @@ -16,8 +16,9 @@ #include "TMethod.h" #include "TMethodArg.h" -#include #include +#include +#include #include @@ -95,6 +96,8 @@ void REveTableViewInfo::SetDisplayedCollection(ElementId_t collectionId) void REveTableViewInfo::AddNewColumnToCurrentCollection(const char* expr, const char* title, int prec) { + static const REveException eh("REveTableViewInfo::AddNewColumnToCurrentCollection"); + if (!fDisplayedCollection) return; @@ -104,21 +107,24 @@ void REveTableViewInfo::AddNewColumnToCurrentCollection(const char* expr, const return; } + if ( ! REveUtil::VerifyObjectFilterOrTableExpression(expr)) { + throw eh + "column-expression verification failed."; + } + const char *rtyp = "void"; auto icls = col->GetItemClass(); std::function fooptr; std::stringstream s; s << "*((std::function<" << rtyp << "(" << icls->GetName() << "*)>*)" << std::hex << std::showbase - << (size_t)(&fooptr) << ") = [](" << icls->GetName() << "* p){" << icls->GetName() << " &i=*p; return (" << expr - << "); }"; + << (size_t)(&fooptr) << ") = [](" << icls->GetName() << "* p){" << icls->GetName() + << " &i=*p; return (" << expr << "); }"; // make ProcessLine() call to check if expr is valid // there may be more efficient check int err; gROOT->ProcessLine(s.str().c_str(), &err); if (err != TInterpreter::kNoError) { - std::cout << "REveTableViewInfo::AddNewColumnToCurrentCollection failed." << std::endl; - return; + throw eh + "column expression compilation check failed."; } fConfigChanged = true; diff --git a/graf3d/eve7/src/REveTypes.cxx b/graf3d/eve7/src/REveTypes.cxx index 6ef843a85aae1..13fb5666d6be5 100644 --- a/graf3d/eve7/src/REveTypes.cxx +++ b/graf3d/eve7/src/REveTypes.cxx @@ -11,7 +11,7 @@ #include #include -#include +#include "TString.h" using namespace ROOT::Experimental; namespace REX = ROOT::Experimental; @@ -53,4 +53,3 @@ REX::RLogChannel &REX::REveLog() static RLogChannel sLog("ROOT.Eve"); return sLog; } - diff --git a/graf3d/eve7/src/REveUtil.cxx b/graf3d/eve7/src/REveUtil.cxx index 4b151e702b82f..2e6c778b8e1be 100644 --- a/graf3d/eve7/src/REveUtil.cxx +++ b/graf3d/eve7/src/REveUtil.cxx @@ -9,9 +9,9 @@ * For the list of contributors see $ROOTSYS/README/CREDITS. * *************************************************************************/ -#include -#include #include +#include +#include #include @@ -29,6 +29,7 @@ #include #include #include +#include using namespace ROOT::Experimental; namespace REX = ROOT::Experimental; @@ -110,6 +111,25 @@ void REveUtil::LoadMacro(const char* mac) } } +//////////////////////////////////////////////////////////////////////////////// +/// Input string verification and sanitization + +bool REveUtil::VerifyObjectFilterOrTableExpression(std::string_view expr) +{ + static const std::regex bad_re("[^\\w](?:gSystem|gROOT)[^\\w]", std::regex::optimize); + static const std::regex public_extra_re("(?:\\|\")", std::regex::optimize); + + auto beg = expr.cbegin(), end = expr.cend(); + if (std::regex_search(beg, end, bad_re)) + return false; + + const bool is_public = true; // to come from gEve + if (is_public && std::regex_search(beg, end, public_extra_re)) + return false; + + return true; +} + //////////////////////////////////////////////////////////////////////////////// /// Fill col with RGBA values corresponding to index ci. If alpha /// is true, set alpha component of col to 255. diff --git a/graf3d/eve7/src/REveViewer.cxx b/graf3d/eve7/src/REveViewer.cxx index 69964b96110d8..f672ff1fdc00c 100644 --- a/graf3d/eve7/src/REveViewer.cxx +++ b/graf3d/eve7/src/REveViewer.cxx @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -106,6 +107,10 @@ List of Viewers providing common operations on REveViewer collections. void REveViewer::SetAxesType(int at) { fAxesType = (EAxesType)at; + if (fAxesType != kAxesNone) { + std::string rf_dir = gSystem->ExpandPathName("${ROOTSYS}/fonts/"); + REX::REveText::AssertSdfFont("LiberationSerif-Regular", rf_dir + "LiberationSerif-Regular.ttf"); + } StampObjProps(); } @@ -357,126 +362,6 @@ void REveViewerList::HandleTooltip() } } -//////////////////////////////////////////////////////////////////////////////// -/// Slot for global TGLViewer::MouseOver() signal. -/// -/// The attempt is made to determine the REveElement being -/// represented by the physical shape and global highlight is updated -/// accordingly. -/// -/// If REveElement::IsPickable() returns false, the element is not -/// highlighted. -/// -/// Highlight is always in single-selection mode. - -void REveViewerList::OnMouseOver(TObject *, UInt_t /*state*/) -{ - // REveElement *el = dynamic_cast(obj); - // if (el && !el->IsPickable()) - // el = nullptr; - - // void *qsender = gTQSender; - // REX::gEve->GetHighlight()->UserPickedElement(el, kFALSE); - // gTQSender = qsender; - - HandleTooltip(); -} - -//////////////////////////////////////////////////////////////////////////////// -/// Slot for global TGLViewer::ReMouseOver(). -/// -/// The obj is dyn-casted to the REveElement and global selection is -/// updated accordingly. -/// -/// If REveElement::IsPickable() returns false, the element is not -/// selected. - -void REveViewerList::OnReMouseOver(TObject *obj, UInt_t /*state*/) -{ - REveElement* el = dynamic_cast(obj); - if (el && !el->IsPickable()) - el = nullptr; - - // void *qsender = gTQSender; - // REX::gEve->GetHighlight()->UserRePickedElement(el); - // gTQSender = qsender; - - HandleTooltip(); -} - -//////////////////////////////////////////////////////////////////////////////// -/// Slot for global TGLViewer::UnMouseOver(). -/// -/// The obj is dyn-casted to the REveElement and global selection is -/// updated accordingly. -/// -/// If REveElement::IsPickable() returns false, the element is not -/// selected. - -void REveViewerList::OnUnMouseOver(TObject *obj, UInt_t /*state*/) -{ - REveElement* el = dynamic_cast(obj); - if (el && !el->IsPickable()) - el = nullptr; - - // void *qsender = gTQSender; - // REX::gEve->GetHighlight()->UserUnPickedElement(el); - // gTQSender = qsender; - - HandleTooltip(); -} - -//////////////////////////////////////////////////////////////////////////////// -/// Slot for global TGLViewer::Clicked(). -/// -/// The obj is dyn-casted to the REveElement and global selection is -/// updated accordingly. -/// -/// If REveElement::IsPickable() returns false, the element is not -/// selected. - -void REveViewerList::OnClicked(TObject *obj, UInt_t /*button*/, UInt_t state) -{ - REveElement* el = dynamic_cast(obj); - if (el && !el->IsPickable()) - el = nullptr; - REX::gEve->GetSelection()->UserPickedElement(el, state & kKeyControlMask); -} - -//////////////////////////////////////////////////////////////////////////////// -/// Slot for global TGLViewer::ReClicked(). -/// -/// The obj is dyn-casted to the REveElement and global selection is -/// updated accordingly. -/// -/// If REveElement::IsPickable() returns false, the element is not -/// selected. - -void REveViewerList::OnReClicked(TObject *obj, UInt_t /*button*/, UInt_t /*state*/) -{ - REveElement* el = dynamic_cast(obj); - if (el && !el->IsPickable()) - el = nullptr; - REX::gEve->GetSelection()->UserRePickedElement(el); -} - -//////////////////////////////////////////////////////////////////////////////// -/// Slot for global TGLViewer::UnClicked(). -/// -/// The obj is dyn-casted to the REveElement and global selection is -/// updated accordingly. -/// -/// If REveElement::IsPickable() returns false, the element is not -/// selected. - -void REveViewerList::OnUnClicked(TObject *obj, UInt_t /*button*/, UInt_t /*state*/) -{ - REveElement* el = dynamic_cast(obj); - if (el && !el->IsPickable()) - el = nullptr; - REX::gEve->GetSelection()->UserUnPickedElement(el); -} - //////////////////////////////////////////////////////////////////////////////// /// Set color brightness. @@ -491,13 +376,14 @@ void REveViewerList::SetColorBrightness(Float_t b) void REveViewerList::SwitchColorSet() { fUseLightColorSet = ! fUseLightColorSet; + // To implement something along the lines of: + // BeginChanges on EveWorld; // Here or in the calling function // for (auto &c: fChildren) { - // TGLViewer* glv = ((REveViewer *)c)->GetGLViewer(); + // REveViewer* v = (REveViewer *)c; // if ( fUseLightColorSet) - // glv->UseLightColorSet(); + // c->UseLightColorSet(); // else - // glv->UseDarkColorSet(); - - // glv->RequestDraw(TGLRnrCtx::kLODHigh); + // c->UseDarkColorSet(); // } + // EndChanges on EveWorld; } diff --git a/graf3d/gl/src/TGLSdfFontMakerLowLevel.icxx b/graf3d/gl/src/TGLSdfFontMakerLowLevel.icxx index 90900d1c23845..ef2bd41682d44 100644 --- a/graf3d/gl/src/TGLSdfFontMakerLowLevel.icxx +++ b/graf3d/gl/src/TGLSdfFontMakerLowLevel.icxx @@ -2144,10 +2144,15 @@ bool Font::load_ttf_file( const char *filename ) { fseek( f, 0, SEEK_SET ); uint8_t *ttf = (unsigned char*) malloc( fsize ); - fread( ttf, 1, fsize, f ); + size_t result = fread( ttf, 1, fsize, f ); + bool res = false; + if (result != fsize) { + perror("Error reading file"); + } else { + res = load_ttf_mem( ttf ); + } fclose( f ); - bool res = load_ttf_mem( ttf ); free( ttf ); return res; } diff --git a/gui/gui/src/TGNumberEntry.cxx b/gui/gui/src/TGNumberEntry.cxx index 56d39d00fc709..1f56c2a14de43 100644 --- a/gui/gui/src/TGNumberEntry.cxx +++ b/gui/gui/src/TGNumberEntry.cxx @@ -305,13 +305,13 @@ static char *RealToStr(char *text, const RealInfo_t & ri) StrInt(p, TMath::Abs(ri.fIntNum), 0); p += strlen(p); if ((ri.fStyle == kRSFrac) || (ri.fStyle == kRSFracExpo)) { - strlcpy(p, ".", 256-strlen(p)); + strlcpy(p, ".", 256-strlen(text)); p++; StrInt(p, TMath::Abs(ri.fFracNum), ri.fFracDigits); p += strlen(p); } if ((ri.fStyle == kRSExpo) || (ri.fStyle == kRSFracExpo)) { - strlcpy(p, "e", 256-strlen(p)); + strlcpy(p, "e", 256-strlen(text)); p++; StrInt(p, ri.fExpoNum, 0); p += strlen(p); diff --git a/gui/gui/src/TRootGuiFactory.cxx b/gui/gui/src/TRootGuiFactory.cxx index f80ac052ac204..cfdde8d2e5968 100644 --- a/gui/gui/src/TRootGuiFactory.cxx +++ b/gui/gui/src/TRootGuiFactory.cxx @@ -78,7 +78,7 @@ void TRootGuiFactory::ShowWebCanvasWarning() TCanvasImp *TRootGuiFactory::CreateCanvasImp(TCanvas *c, const char *title, UInt_t width, UInt_t height) { - TString canvName = gEnv->GetValue("Canvas.Name", "TWebCanvas"); + TString canvName = gEnv->GetValue("Canvas.Name", "TRootCanvas"); if (canvName == "TWebCanvas") { auto ph = gROOT->GetPluginManager()->FindHandler("TCanvasImp", "TWebCanvas"); @@ -98,7 +98,7 @@ TCanvasImp *TRootGuiFactory::CreateCanvasImp(TCanvas *c, const char *title, TCanvasImp *TRootGuiFactory::CreateCanvasImp(TCanvas *c, const char *title, Int_t x, Int_t y, UInt_t width, UInt_t height) { - TString canvName = gEnv->GetValue("Canvas.Name", "TWebCanvas"); + TString canvName = gEnv->GetValue("Canvas.Name", "TRootCanvas"); if (canvName == "TWebCanvas") { auto ph = gROOT->GetPluginManager()->FindHandler("TCanvasImp", "TWebCanvas"); diff --git a/gui/webdisplay/inc/ROOT/RWebWindowsManager.hxx b/gui/webdisplay/inc/ROOT/RWebWindowsManager.hxx index d1f285f17a6d8..9e8f95247b06c 100644 --- a/gui/webdisplay/inc/ROOT/RWebWindowsManager.hxx +++ b/gui/webdisplay/inc/ROOT/RWebWindowsManager.hxx @@ -111,6 +111,7 @@ public: static void SetUseSessionKey(bool on = true); static void SetUseConnectionKey(bool on = true); + static void SetSingleConnMode(bool on = true); static void AddServerLocation(const std::string &server_prefix, const std::string &files_path); static std::map GetServerLocations(); diff --git a/gui/webdisplay/src/RWebDisplayHandle.cxx b/gui/webdisplay/src/RWebDisplayHandle.cxx index bf6ee7535cd6b..4122735a7dc8f 100644 --- a/gui/webdisplay/src/RWebDisplayHandle.cxx +++ b/gui/webdisplay/src/RWebDisplayHandle.cxx @@ -331,8 +331,16 @@ RWebDisplayHandle::BrowserCreator::Display(const RWebDisplayArgs &args) R__LOG_DEBUG(0, WebGUILog()) << "Show web window in browser with posix_spawn:\n" << fProg << " " << exec; + posix_spawn_file_actions_t action; + posix_spawn_file_actions_init(&action); + posix_spawn_file_actions_addopen (&action, STDOUT_FILENO, "/dev/null", O_WRONLY|O_APPEND, 0); + posix_spawn_file_actions_addopen (&action, STDERR_FILENO, "/dev/null", O_WRONLY|O_APPEND, 0); + pid_t pid; - int status = posix_spawn(&pid, argv[0], nullptr, nullptr, argv.data(), nullptr); + int status = posix_spawn(&pid, argv[0], &action, nullptr, argv.data(), nullptr); + + posix_spawn_file_actions_destroy(&action); + if (status != 0) { if (!tmpfile.empty()) gSystem->Unlink(tmpfile.c_str()); @@ -511,11 +519,11 @@ RWebDisplayHandle::ChromeCreator::ChromeCreator(bool _edge) : BrowserCreator(tru if (use_normal) { // old browser with standard headless mode fBatchExec = gEnv->GetValue((fEnvPrefix + "Batch").c_str(), "$prog --headless --no-sandbox --disable-extensions --disable-audio-output $geometry --dump-dom $url 2>/dev/null"); - fHeadlessExec = gEnv->GetValue((fEnvPrefix + "Headless").c_str(), "$prog --headless --no-sandbox --disable-extensions --disable-audio-output $geometry \'$url\' >/dev/null 2>/dev/null &"); + fHeadlessExec = gEnv->GetValue((fEnvPrefix + "Headless").c_str(), "fork:--headless --no-sandbox --disable-extensions --disable-audio-output $geometry $url"); } else { // newer version with headless=new mode fBatchExec = gEnv->GetValue((fEnvPrefix + "Batch").c_str(), "$prog --headless=new --no-sandbox --disable-extensions --disable-audio-output $geometry --dump-dom $url 2>/dev/null"); - fHeadlessExec = gEnv->GetValue((fEnvPrefix + "Headless").c_str(), "$prog --headless=new --no-sandbox --disable-extensions --disable-audio-output $geometry \'$url\' >/dev/null 2>/dev/null &"); + fHeadlessExec = gEnv->GetValue((fEnvPrefix + "Headless").c_str(), "fork:--headless=new --no-sandbox --disable-extensions --disable-audio-output $geometry $url"); } fExec = gEnv->GetValue((fEnvPrefix + "Interactive").c_str(), "$prog $geometry --new-window --app=\'$url\' >/dev/null 2>/dev/null &"); #endif diff --git a/gui/webdisplay/src/RWebWindow.cxx b/gui/webdisplay/src/RWebWindow.cxx index 5dd522d3b019c..3476019b4e929 100644 --- a/gui/webdisplay/src/RWebWindow.cxx +++ b/gui/webdisplay/src/RWebWindow.cxx @@ -690,12 +690,18 @@ void RWebWindow::CheckInactiveConnections() /// Configure maximal number of allowed connections - 0 is unlimited /// Will not affect already existing connections /// Default is 1 - the only client is allowed +/// Because of security reasons setting number of allowed connections is not sufficient now. +/// To enable multi-connection mode, one also has to call +/// `ROOT::RWebWindowsManager::SetSingleConnMode(false);` +/// before creating of the RWebWindow instance void RWebWindow::SetConnLimit(unsigned lmt) { + bool single_conn_mode = RWebWindowWSHandler::GetBoolEnv("WebGui.SingleConnMode", 1) == 1; + std::lock_guard grd(fConnMutex); - fConnLimit = lmt; + fConnLimit = single_conn_mode ? 1 : lmt; } ///////////////////////////////////////////////////////////////////////// diff --git a/gui/webdisplay/src/RWebWindowsManager.cxx b/gui/webdisplay/src/RWebWindowsManager.cxx index 098a9cb3ea6e6..e01383289e8eb 100644 --- a/gui/webdisplay/src/RWebWindowsManager.cxx +++ b/gui/webdisplay/src/RWebWindowsManager.cxx @@ -170,6 +170,18 @@ void RWebWindowsManager::SetUseConnectionKey(bool on) gEnv->SetValue("WebGui.OnetimeKey", on ? "yes" : "no"); } +////////////////////////////////////////////////////////////////////////////////////////// +/// Enable or disable single connection mode (default on) +/// If enabled, one connection only with any web widget is possible +/// Any attempt to establish more connections will fail +/// if this mode is disabled some widgets like geom viewer or web canvas will be able to +/// to serve several clients - only when they are connected with required authentication keys + +void RWebWindowsManager::SetSingleConnMode(bool on) +{ + gEnv->SetValue("WebGui.SingleConnMode", on ? "yes" : "no"); +} + ////////////////////////////////////////////////////////////////////////////////////////// /// Configure server location which can be used for loading of custom scripts or files /// When THttpServer instance of RWebWindowsManager will be created, @@ -752,6 +764,7 @@ std::string RWebWindowsManager::GetUrl(RWebWindow &win, bool remote, std::string /// /// WebGui.Display: kind of display like chrome or firefox or browser, can be overwritten by --web=value command line argument /// WebGui.OnetimeKey: if configured requires unique key every time window is connected (default yes) +/// WebGui.SingleConnMode: if configured the only connection and the only user of any widget is possible (default yes) /// WebGui.Chrome: full path to Google Chrome executable /// WebGui.ChromeBatch: command to start chrome in batch, used for image production, like "$prog --headless --disable-gpu $geometry $url" /// WebGui.ChromeHeadless: command to start chrome in headless mode, like "fork: --headless --disable-gpu $geometry $url" @@ -838,9 +851,14 @@ unsigned RWebWindowsManager::ShowWindow(RWebWindow &win, const RWebDisplayArgs & if (!args.IsHeadless() && normal_http) { auto winurl = args.GetUrl(); winurl.erase(0, fAddr.length()); - InformListener(std::string("win:") + winurl); + InformListener(std::string("win:") + winurl + "\n"); } + auto server = GetServer(); + + if (win.IsUseCurrentDir() && server) + server->AddLocation("currentdir/", "."); + if (!args.IsHeadless() && ((args.GetBrowserKind() == RWebDisplayArgs::kServer) || gROOT->IsWebDisplayBatch()) /*&& (RWebWindowWSHandler::GetBoolEnv("WebGui.OnetimeKey") != 1)*/) { std::cout << "New web window: " << args.GetUrl() << std::endl; return 0; @@ -868,11 +886,6 @@ unsigned RWebWindowsManager::ShowWindow(RWebWindow &win, const RWebDisplayArgs & } #endif - auto server = GetServer(); - - if (win.IsUseCurrentDir()) - server->AddLocation("currentdir/", "."); - if (!normal_http) args.SetHttpServer(server); diff --git a/gui/webdisplay/test/CMakeLists.txt b/gui/webdisplay/test/CMakeLists.txt index 21957931ed103..435b4a138ff3e 100644 --- a/gui/webdisplay/test/CMakeLists.txt +++ b/gui/webdisplay/test/CMakeLists.txt @@ -10,6 +10,7 @@ # test only can be run if Firefox or Chrome are detected on the system +if(NOT APPLE) if (CHROME_EXECUTABLE OR FIREFOX_EXECUTABLE) ROOT_ADD_TEST(test-webgui-ping RUN_SERIAL @@ -18,3 +19,4 @@ if (CHROME_EXECUTABLE OR FIREFOX_EXECUTABLE) PASSREGEX "PING-PONG TEST COMPLETED" TIMEOUT 300) endif() +endif() diff --git a/hist/hist/inc/TH2Poly.h b/hist/hist/inc/TH2Poly.h index d35c0e00686ef..440ca52411fc1 100644 --- a/hist/hist/inc/TH2Poly.h +++ b/hist/hist/inc/TH2Poly.h @@ -81,7 +81,6 @@ class TH2Poly : public TH2 { Bool_t Add(const TH1 *h1, const TH1 *h2, Double_t c1=1, Double_t c2=1) override; Bool_t Add(TF1 *h1, Double_t c1=1, Option_t *option="") override; void ClearBinContents(); // Clears the content of all bins - TObject *Clone(const char* newname = "") const override; void Copy(TObject & newth2p) const override; void ChangePartition(Int_t n, Int_t m); // Sets the number of partition cells to another value using TH2::Multiply; diff --git a/hist/hist/src/TH1.cxx b/hist/hist/src/TH1.cxx index 983736ff7c3d4..0256454b9c8b6 100644 --- a/hist/hist/src/TH1.cxx +++ b/hist/hist/src/TH1.cxx @@ -4635,13 +4635,34 @@ Int_t TH1::GetQuantiles(Int_t n, Double_t *xp, const Double_t *p) for (i = 0; i < nq; i++) { ibin = TMath::BinarySearch(nbins,fIntegral,prob[i]); - while (ibin < nbins-1 && fIntegral[ibin+1] == prob[i]) { - if (fIntegral[ibin+2] == prob[i]) ibin++; - else break; + if (fIntegral[ibin] == prob[i]) { + if (prob[i] == 0.) { + for (; ibin+1 <= nbins && fIntegral[ibin+1] == 0.; ++ibin) { + + } + xp[i] = fXaxis.GetBinUpEdge(ibin); + } + else if (prob[i] == 1.) { + xp[i] = fXaxis.GetBinUpEdge(ibin); + } + else { + // Find equal integral in later bins (ie their entries are zero) + Double_t width = 0; + for (Int_t j = ibin+1; j <= nbins; ++j) { + if (prob[i] == fIntegral[j]) { + width += fXaxis.GetBinWidth(j); + } + else + break; + } + xp[i] = width == 0 ? fXaxis.GetBinCenter(ibin) : fXaxis.GetBinUpEdge(ibin) + width/2.; + } + } + else { + xp[i] = GetBinLowEdge(ibin+1); + const Double_t dint = fIntegral[ibin+1]-fIntegral[ibin]; + if (dint > 0) xp[i] += GetBinWidth(ibin+1)*(prob[i]-fIntegral[ibin])/dint; } - xp[i] = GetBinLowEdge(ibin+1); - const Double_t dint = fIntegral[ibin+1]-fIntegral[ibin]; - if (dint > 0) xp[i] += GetBinWidth(ibin+1)*(prob[i]-fIntegral[ibin])/dint; } if (!p) delete [] prob; diff --git a/hist/hist/src/TH2Poly.cxx b/hist/hist/src/TH2Poly.cxx index af55b709b1f79..3d5d8937c8272 100644 --- a/hist/hist/src/TH2Poly.cxx +++ b/hist/hist/src/TH2Poly.cxx @@ -547,19 +547,6 @@ void TH2Poly::ChangePartition(Int_t n, Int_t m) } } -//////////////////////////////////////////////////////////////////////////////// -/// Make a complete copy of the underlying object. If 'newname' is set, -/// the copy's name will be set to that name. - -TObject* TH2Poly::Clone(const char* newname) const -{ - // TH1::Clone relies on ::Copy to implemented by the derived class. - // Until this is implemented, revert to the much slower default version - // (and possibly non-thread safe). - - return TNamed::Clone(newname); -} - //////////////////////////////////////////////////////////////////////////////// /// Clears the contents of all bins in the histogram. diff --git a/hist/hist/test/test_MapCppName.cxx b/hist/hist/test/test_MapCppName.cxx index 4985bfa18ef5c..c99b450201376 100644 --- a/hist/hist/test/test_MapCppName.cxx +++ b/hist/hist/test/test_MapCppName.cxx @@ -45,7 +45,7 @@ TEST(TH1, MapCppNameTest) if (!gSystem->GetPathInfo(CFile.Data(), fs)) FileSize = (Int_t)fs.fSize; - EXPECT_NEAR(FileSize, 5950, 200); + EXPECT_NEAR(FileSize, 5965, 200); gSystem->Unlink(CFile.Data()); } diff --git a/interpreter/cling/tools/plugins/clad/CMakeLists.txt b/interpreter/cling/tools/plugins/clad/CMakeLists.txt index ba5f09786caef..3c70d00a10f3b 100644 --- a/interpreter/cling/tools/plugins/clad/CMakeLists.txt +++ b/interpreter/cling/tools/plugins/clad/CMakeLists.txt @@ -68,8 +68,10 @@ set(_clad_extra_settings # If the CLAD_SOURCE_DIR variable is defined in the CMake configuration, we're # skipping the download of the repository and use the passed directory. if (DEFINED CLAD_SOURCE_DIR) - list(APPEND _clad_extra_settings DOWNLOAD_COMMAND "") list(APPEND _clad_extra_settings SOURCE_DIR ${CLAD_SOURCE_DIR}) +else() + list(APPEND _clad_extra_settings GIT_REPOSITORY https://github.com/vgvassilev/clad.git) + list(APPEND _clad_extra_settings GIT_TAG v1.7) endif() #list(APPEND _clad_patches_list "patch1.patch" "patch2.patch") @@ -82,8 +84,6 @@ endif() ExternalProject_Add( clad - GIT_REPOSITORY https://github.com/vgvassilev/clad.git - GIT_TAG v1.7 UPDATE_COMMAND "" PATCH_COMMAND ${_clad_patch_command} CMAKE_ARGS -G ${CMAKE_GENERATOR} diff --git a/interpreter/llvm-project/llvm-project.tag b/interpreter/llvm-project/llvm-project.tag index 67e35cd34f6ce..c0902c399f49e 100644 --- a/interpreter/llvm-project/llvm-project.tag +++ b/interpreter/llvm-project/llvm-project.tag @@ -1 +1 @@ -ROOT-llvm18-20240821-01 +ROOT-llvm18-20250207-v6-34-01 diff --git a/interpreter/llvm-project/llvm/include/llvm/ADT/SmallVector.h b/interpreter/llvm-project/llvm/include/llvm/ADT/SmallVector.h index 09676d792dfeb..17444147b102a 100644 --- a/interpreter/llvm-project/llvm/include/llvm/ADT/SmallVector.h +++ b/interpreter/llvm-project/llvm/include/llvm/ADT/SmallVector.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/interpreter/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/interpreter/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h index 437a7bd6ff6c4..fd7d794849b31 100644 --- a/interpreter/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h +++ b/interpreter/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h @@ -13,6 +13,7 @@ #ifndef LLVM_LIB_TARGET_X86_MCTARGETDESC_X86MCTARGETDESC_H #define LLVM_LIB_TARGET_X86_MCTARGETDESC_X86MCTARGETDESC_H +#include #include #include diff --git a/io/io/inc/TFileMerger.h b/io/io/inc/TFileMerger.h index e4445b4d8bb9a..0336ed250b350 100644 --- a/io/io/inc/TFileMerger.h +++ b/io/io/inc/TFileMerger.h @@ -55,6 +55,8 @@ class TFileMerger : public TObject { TList fMergeList; ///< list of TObjString containing the name of the files need to be merged TList fExcessFiles; ///GetName()); + // See https://github.com/root-project/root/issues/17003 newName = new char[nch+1]; strlcpy(newName,oname,nch+1); for (Int_t i=0;iGetValue("TFile.v630forwardCompatibility", 0) == 1) + SetBit(k630forwardCompatibility); + //*-* Write Directory info Int_t namelen= TNamed::Sizeof(); Int_t nbytes = namelen + TDirectoryFile::Sizeof(); @@ -4111,13 +4116,19 @@ TFile *TFile::Open(const char *url, Option_t *options, const char *ftitle, ssize_t len = getxattr(fileurl.GetFile(), "eos.url.xroot", nullptr, 0); if (len > 0) { std::string xurl(len, 0); - if (getxattr(fileurl.GetFile(), "eos.url.xroot", &xurl[0], len) == len) { - if ((f = TFile::Open(xurl.c_str(), options, ftitle, compress, netopt))) { - if (!f->IsZombie()) { - return f; - } else { - delete f; - f = nullptr; + std::string fileNameFromUrl{fileurl.GetFile()}; + if (getxattr(fileNameFromUrl.c_str(), "eos.url.xroot", &xurl[0], len) == len) { + // Sometimes the `getxattr` call may return an invalid URL due + // to the POSIX attribute not being yet completely filled by EOS. + if (auto baseName = fileNameFromUrl.substr(fileNameFromUrl.find_last_of("/") + 1); + std::equal(baseName.crbegin(), baseName.crend(), xurl.crbegin())) { + if ((f = TFile::Open(xurl.c_str(), options, ftitle, compress, netopt))) { + if (!f->IsZombie()) { + return f; + } else { + delete f; + f = nullptr; + } } } } diff --git a/io/io/src/TFileMerger.cxx b/io/io/src/TFileMerger.cxx index 6bda10a191d8a..98e2eaa38ea19 100644 --- a/io/io/src/TFileMerger.cxx +++ b/io/io/src/TFileMerger.cxx @@ -124,6 +124,15 @@ void TFileMerger::Reset() fObjectNames.Clear(); } +//////////////////////////////////////////////////////////////////////////////// +/// Closes output file + +void TFileMerger::CloseOutputFile() +{ + fOutFileWasExplicitlyClosed = true; + SafeDelete(fOutputFile); +} + //////////////////////////////////////////////////////////////////////////////// /// Add file to file merger. @@ -291,6 +300,7 @@ Bool_t TFileMerger::OutputFile(const char *outputfile, Bool_t force) { Bool_t res = OutputFile(outputfile,(force?"RECREATE":"CREATE"),1); // 1 is the same as the default from the TFile constructor. fExplicitCompLevel = kFALSE; + fOutFileWasExplicitlyClosed = false; return res; } @@ -1086,7 +1096,7 @@ Bool_t TFileMerger::OpenExcessFiles() void TFileMerger::RecursiveRemove(TObject *obj) { - if (obj == fOutputFile) { + if (obj == fOutputFile && !fOutFileWasExplicitlyClosed) { Fatal("RecursiveRemove","Output file of the TFile Merger (targeting %s) has been deleted (likely due to a TTree larger than 100Gb)", fOutputFilename.Data()); } diff --git a/io/io/test/TFileTests.cxx b/io/io/test/TFileTests.cxx index b0d48bd35989b..0a5aad45d06ed 100644 --- a/io/io/test/TFileTests.cxx +++ b/io/io/test/TFileTests.cxx @@ -9,6 +9,7 @@ #include "TPluginManager.h" #include "TROOT.h" // gROOT #include "TSystem.h" +#include "TEnv.h" // gEnv TEST(TFile, WriteObjectTObject) { @@ -125,6 +126,8 @@ void TestReadWithoutGlobalRegistrationIfPossible(const char *fname) } // https://github.com/root-project/root/issues/10742 +#ifndef R__WIN32 +// We prefer not to read remotely files from Windows, if possible TEST(TFile, ReadWithoutGlobalRegistrationWeb) { const auto webFile = "http://root.cern/files/h1/dstarmb.root"; @@ -135,3 +138,22 @@ TEST(TFile, ReadWithoutGlobalRegistrationNet) const auto netFile = "root://eospublic.cern.ch//eos/root-eos/h1/dstarmb.root"; TestReadWithoutGlobalRegistrationIfPossible(netFile); } +#endif + +// https://github.com/root-project/root/issues/16189 +TEST(TFile, k630forwardCompatibility) +{ + gEnv->SetValue("TFile.v630forwardCompatibility", 1); + const std::string filename{"filek30.root"}; + // Testing that the flag is also set when creating the file from scratch (as opposed to "UPDATE") + TFile filec{filename.c_str(),"RECREATE"}; + ASSERT_EQ(filec.TestBit(TFile::k630forwardCompatibility), true); + filec.Close(); + TFile filer{filename.c_str(),"READ"}; + ASSERT_EQ(filer.TestBit(TFile::k630forwardCompatibility), true); + filer.Close(); + TFile fileu{filename.c_str(),"UPDATE"}; + ASSERT_EQ(fileu.TestBit(TFile::k630forwardCompatibility), true); + fileu.Close(); + gSystem->Unlink(filename.c_str()); +} diff --git a/js/build/jsroot.js b/js/build/jsroot.js index de954e32b556a..7fa1e2a01cad9 100644 --- a/js/build/jsroot.js +++ b/js/build/jsroot.js @@ -1,4 +1,4 @@ -// https://root.cern/js/ v7.7.99 +// https://root.cern/js/ v7.8.2 (function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) : typeof define === 'function' && define.amd ? define(['exports'], factory) : @@ -8,11 +8,11 @@ typeof define === 'function' && define.amd ? define(['exports'], factory) : var _documentCurrentScript = typeof document !== 'undefined' ? document.currentScript : null; /** @summary version id * @desc For the JSROOT release the string in format 'major.minor.patch' like '7.0.0' */ -const version_id = 'dev', +const version_id = '7.8.x', /** @summary version date * @desc Release date in format day/month/year like '14/04/2022' */ -version_date = '30/10/2024', +version_date = '10/03/2025', /** @summary version id and date * @desc Produced by concatenation of {@link version_id} and {@link version_date} @@ -109,6 +109,7 @@ if ((typeof document !== 'undefined') && (typeof window !== 'undefined') && (typ browser.chromeVersion = (browser.isChrome || browser.isChromeHeadless) ? parseInt(navigator.userAgent.match(/Chrom(?:e|ium)\/([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/)[1]) : 0; browser.isWin = navigator.userAgent.indexOf('Windows') >= 0; } + browser.android = /android/i.test(navigator.userAgent); browser.touches = ('ontouchend' in document); // identify if touch events are supported browser.screenWidth = window.screen?.width ?? 1200; } @@ -295,8 +296,8 @@ settings = { * @desc When specified, extra URL parameter like ```?stamp=unique_value``` append to each files loaded * In such case browser will be forced to load file content disregards of server cache settings * Can be disabled by providing &usestamp=false in URL or via Settings/Files sub-menu - * @default true */ - UseStamp: true, + * Disabled by default on node.js, enabled in the web browsers */ + UseStamp: !nodejs, /** @summary Maximal number of bytes ranges in http 'Range' header * @desc Some http server has limitations for number of bytes ranges therefore let change maximal number via setting * @default 200 */ @@ -1094,7 +1095,8 @@ const prROOT = 'ROOT.', clTObject = 'TObject', clTNamed = 'TNamed', clTString = clTF1 = 'TF1', clTF2 = 'TF2', clTF3 = 'TF3', clTProfile = 'TProfile', clTProfile2D = 'TProfile2D', clTProfile3D = 'TProfile3D', clTGeoVolume = 'TGeoVolume', clTGeoNode = 'TGeoNode', clTGeoNodeMatrix = 'TGeoNodeMatrix', nsREX = 'ROOT::Experimental::', nsSVG = 'http://www.w3.org/2000/svg', - kNoZoom = -1111, kNoStats = BIT(9), kInspect = 'inspect', kTitle = 'title'; + kNoZoom = -1111, kNoStats = BIT(9), kInspect = 'inspect', kTitle = 'title', + urlClassPrefix = 'https://root.cern/doc/master/class'; /** @summary Create some ROOT classes @@ -2044,6 +2046,7 @@ setHistogramTitle: setHistogramTitle, settings: settings, get source_dir () { return exports.source_dir; }, toJSON: toJSON, +urlClassPrefix: urlClassPrefix, version: version, version_date: version_date, version_id: version_id @@ -9322,7 +9325,7 @@ async function loadFontFile(fname) { if (entry?.promises !== undefined) { return new Promise(resolveFunc => { - cfg.promises.push(resolveFunc); + entry.promises.push(resolveFunc); }); } @@ -9386,6 +9389,7 @@ class FontHandler { this.size = Math.round(size || 11); this.scale = scale; + this.index = 0; this.func = this.setFont.bind(this); @@ -9393,8 +9397,11 @@ class FontHandler { if (fontIndex && isObject(fontIndex)) cfg = fontIndex; - else - cfg = root_fonts[(fontIndex && Number.isInteger(fontIndex)) ? Math.floor(fontIndex / 10) : 0]; + else { + if (fontIndex && Number.isInteger(fontIndex)) + this.index = Math.floor(fontIndex / 10); + cfg = root_fonts[this.index]; + } if (cfg) { this.cfg = cfg; @@ -10095,7 +10102,7 @@ function parseLatex(node, arg, label, curr) { }, createSubPos = fscale => { - return { lvl: curr.lvl + 1, x: 0, y: 0, fsize: curr.fsize*(fscale || 1), color: curr.color, font: curr.font, parent: curr, painter: curr.painter }; + return { lvl: curr.lvl + 1, x: 0, y: 0, fsize: curr.fsize*(fscale || 1), color: curr.color, font: curr.font, parent: curr, painter: curr.painter, italic: curr.italic, bold: curr.bold }; }; while (label) { @@ -10442,11 +10449,7 @@ function parseLatex(node, arg, label, curr) { const subpos = createSubPos(); - let value; - for (let c = curr; c && (value === undefined && c); c = c.parent) - value = c[found.bi]; - - subpos[found.bi] = !value; + subpos[found.bi] = !subpos[found.bi]; parseLatex(currG(), arg, sublabel, subpos); @@ -13037,9 +13040,10 @@ class ObjectPainter extends BasePainter { let cl = this.getClassName(); const p = cl.lastIndexOf('::'); if (p > 0) cl = cl.slice(p+2); - const title = (cl && name) ? `${cl}:${name}` : (cl || name || 'object'); + const hdr = (cl && name) ? `${cl}:${name}` : (cl || name || 'object'), + url = (p < 0) ? `${urlClassPrefix}${cl}.html` : ''; - menu.header(title); + menu.header(hdr, url); const size0 = menu.size(); @@ -56583,7 +56587,7 @@ function createSVGRenderer(as_is, precision, doc) { _textSizeAttr = `viewBox="${wrap.svg_attr.viewBox}" width="${wrap.svg_attr.width}" height="${wrap.svg_attr.height}"`, _textClearAttr = wrap.svg_style.backgroundColor ? ` style="background:${wrap.svg_style.backgroundColor}"` : ''; - return `${wrap.accPath}`; + return `${wrap.accPath}`; }; rndr.fillTargetSVG = function(svg) { @@ -57643,6 +57647,21 @@ function createOrbitControl(painter, camera, scene, renderer, lookat) { const intersects = this.getMouseIntersects(mouse_pos); this.processSingleClick(intersects); } + + if (kind === 3) { + const intersects = this.getMouseIntersects(mouse_pos); + let objpainter = null; + for (let i = 0; !objpainter && (i < intersects.length); ++i) { + const obj3d = intersects[i].object; + objpainter = obj3d.painter || obj3d.parent?.painter; // check one top level + } + if (objpainter) { + // while axis painter not directly appears in the list of primitives, pad and canvas take from frame + const padp = this.painter?.getPadPainter(), + canvp = this.painter?.getCanvPainter(); + canvp?.producePadEvent('select', padp, objpainter); + } + } }; control.lstn_click = function(evnt) { @@ -57656,9 +57675,11 @@ function createOrbitControl(painter, camera, scene, renderer, lookat) { let kind = 0; if (isFunc(this.painter?.getFramePainter()?._click_handler)) - kind = 1; // user click handler + kind = 1; // user click handler else if (this.processSingleClick && this.painter?.options?.mouse_click) kind = 2; // eve7 click handler + else if (this.painter?.getCanvPainter()) + kind = 3; // select event for GED // if normal event, set longer timeout waiting if double click not detected if (kind) @@ -57880,7 +57901,7 @@ class PointsCreator { const handler = new TAttMarkerHandler({ style: args.style, color: args.color, size: 7 }), w = handler.fill ? 1 : 7, - imgdata = '' + + imgdata = `` + ``+ '', dataUrl = prSVG + (isNodeJs() ? imgdata : encodeURIComponent(imgdata)); @@ -60822,8 +60843,8 @@ class JSRootMenu { } /** @summary Add menu header - must be first entry */ - header(name) { - this.add(sHeader + name); + header(name, title) { + this.add(sHeader + name, undefined, undefined, title); } /** @summary Add draw sub-menu with draw options @@ -61463,8 +61484,10 @@ class JSRootMenu { }); this.addchk(faxis.TestBit(EAxisBits.kCenterTitle), 'Center', arg => { faxis.InvertBit(EAxisBits.kCenterTitle); painter.interactiveRedraw('pad', `exec:CenterTitle(${arg})`, kind); }); - this.addchk(faxis.TestBit(EAxisBits.kOppositeTitle), 'Opposite', - () => { faxis.InvertBit(EAxisBits.kOppositeTitle); painter.redrawPad(); }); + if (!painter?.snapid) { + this.addchk(faxis.TestBit(EAxisBits.kOppositeTitle), 'Opposite', + () => { faxis.InvertBit(EAxisBits.kOppositeTitle); painter.redrawPad(); }); + } this.addchk(faxis.TestBit(EAxisBits.kRotateTitle), 'Rotate', arg => { faxis.InvertBit(EAxisBits.kRotateTitle); painter.interactiveRedraw('pad', is_gaxis ? `exec:SetBit(TAxis::kRotateTitle, ${arg})` : `exec:RotateTitle(${arg})`, kind); }); if (is_gaxis) { @@ -61892,7 +61915,7 @@ class StandaloneMenu extends JSRootMenu { return curr.push({ divider: true }); if (name.indexOf(sHeader) === 0) - return curr.push({ text: name.slice(sHeader.length), header: true }); + return curr.push({ text: name.slice(sHeader.length), header: true, title }); if (name === sEndsub) { this.stack.pop(); @@ -62000,16 +62023,50 @@ class StandaloneMenu extends JSRootMenu { if (d.header) { item.style = 'background-color: lightblue; padding: 3px 7px; font-weight: bold; border-bottom: 1px;'; - item.innerHTML = d.text; + + let url = '', title = ''; + if (d.title) { + const p = d.title.indexOf('https://'); + if (p >= 0) { + url = d.title.slice(p); + title = d.title.slice(0, p); + } else + title = d.title; + } + if (!url) + item.innerHTML = d.text; + else { + item.style.display = 'flex'; + item.style['justify-content'] = 'space-between'; + + const txt = doc.createElement('span'); + txt.innerHTML = d.text; + txt.style = 'display: inline-block; margin: 0;'; + item.appendChild(txt); + + const anchor = doc.createElement('span'); + anchor.style = 'margin: 0; color: blue; opacity: 0.1; margin-left: 7px; right: 3px; display: inline-block; cursor: pointer;'; + anchor.textContent = '?'; + anchor.title = url; + anchor.addEventListener('click', () => { + const cp = this.painter?.getCanvPainter(); + if (cp?.canSendWebSocket()) + cp.sendWebsocket(`SHOWURL:${url}`); + else + window.open(url); + }); + anchor.addEventListener('mouseenter', () => { anchor.style.opacity = 1; }); + anchor.addEventListener('mouseleave', () => { anchor.style.opacity = 0.1; }); + item.appendChild(anchor); + } + if (title) + item.setAttribute('title', title); + return; } const hovArea = doc.createElement('div'); - hovArea.style.width = '100%'; - hovArea.style.height = '100%'; - hovArea.style.display = 'flex'; - hovArea.style.justifyContent = 'space-between'; - hovArea.style.cursor = 'pointer'; + hovArea.style = 'width: 100%; height: 100%; display: flex; justify-content: space-between; cursor: pointer;'; if (d.title) hovArea.setAttribute('title', d.title); item.appendChild(hovArea); @@ -62066,7 +62123,7 @@ class StandaloneMenu extends JSRootMenu { if (d.extraText || d.sub) { const extraText = doc.createElement('span'); extraText.className = 'jsroot_ctxt_extraText'; - extraText.style = 'margin: 0; padding: 3px 7px; color: rgb(0, 0, 0, 0.6);'; + extraText.style = 'margin: 0; padding: 3px 7px; color: rgba(0, 0, 0, 0.6);'; extraText.textContent = d.sub ? '\u25B6' : d.extraText; hovArea.appendChild(extraText); @@ -62753,6 +62810,9 @@ class TAxisPainter extends ObjectPainter { /** @summary cleanup painter */ cleanup() { this.cleanupAxisPainter(); + delete this.hist_painter; + delete this.hist_axis; + delete this.is_gaxis; super.cleanup(); } @@ -63134,12 +63194,22 @@ class TAxisPainter extends ObjectPainter { return this.getObject()?.TestBit(EAxisBits.kCenterLabels); } + /** @summary Is labels should be rotated */ + isRotateLabels() { + return this.getObject()?.TestBit(EAxisBits.kLabelsVert); + } + + /** @summary Is title should be rotated */ + isRotateTitle() { + return this.getObject()?.TestBit(EAxisBits.kRotateTitle); + } + /** @summary Add interactive elements to draw axes title */ addTitleDrag(title_g, vertical, offset_k, reverse, axis_length) { if (!settings.MoveResize || this.isBatchMode()) return; - let drag_rect = null, - acc_x, acc_y, new_x, new_y, sign_0, alt_pos, curr_indx; + let drag_rect = null, x_0, y_0, i_0, + acc_x, acc_y, new_x, new_y, sign_0, alt_pos, curr_indx, can_indx0 = true; const drag_move = drag().subject(Object); drag_move.on('start', evnt => { @@ -63149,10 +63219,11 @@ class TAxisPainter extends ObjectPainter { const box = title_g.node().getBBox(), // check that elements visible, request precise value title_length = vertical ? box.height : box.width; - new_x = acc_x = title_g.property('shift_x'); - new_y = acc_y = title_g.property('shift_y'); + x_0 = new_x = acc_x = title_g.property('shift_x'); + y_0 = new_y = acc_y = title_g.property('shift_y'); sign_0 = vertical ? (acc_x > 0) : (acc_y > 0); // sign should remain + can_indx0 = !this.hist_painter?.snapid; // online canvas does not allow alternate position alt_pos = vertical ? [axis_length, axis_length/2, 0] : [0, axis_length/2, axis_length]; // possible positions const off = vertical ? -title_length/2 : title_length/2; @@ -63169,12 +63240,13 @@ class TAxisPainter extends ObjectPainter { if (this.titleCenter) curr_indx = 1; - else if (reverse ^ this.titleOpposite) + else if ((reverse ^ this.titleOpposite) && can_indx0) curr_indx = 0; else curr_indx = 2; alt_pos[curr_indx] = vertical ? acc_y : acc_x; + i_0 = curr_indx; drag_rect = title_g.append('rect') .attr('x', box.x) @@ -63193,11 +63265,13 @@ class TAxisPainter extends ObjectPainter { acc_x += evnt.dx; acc_y += evnt.dy; - let set_x, set_y, besti = 0; + let set_x, set_y, besti = can_indx0 ? 0 : 1; const p = vertical ? acc_y : acc_x; - for (let i = 1; i < 3; ++i) - if (Math.abs(p - alt_pos[i]) < Math.abs(p - alt_pos[besti])) besti = i; + for (let i = 1; i < 3; ++i) { + if (Math.abs(p - alt_pos[i]) < Math.abs(p - alt_pos[besti])) + besti = i; + } if (vertical) { set_x = acc_x; @@ -63208,7 +63282,9 @@ class TAxisPainter extends ObjectPainter { } if (sign_0 === (vertical ? (set_x > 0) : (set_y > 0))) { - new_x = set_x; new_y = set_y; curr_indx = besti; + new_x = set_x; + new_y = set_y; + curr_indx = besti; makeTranslate(title_g, new_x, new_y); } }).on('end', evnt => { @@ -63242,10 +63318,14 @@ class TAxisPainter extends ObjectPainter { setBit(EAxisBits.kOppositeTitle, false); this.titleOpposite = false; } - this.submitAxisExec(`SetTitleOffset(${offset});;SetBit(${EAxisBits.kCenterTitle},${this.titleCenter?1:0})`); - drag_rect.remove(); drag_rect = null; + + if ((x_0 !== new_x) || (y_0 !== new_y) || (i_0 !== curr_indx)) + this.submitAxisExec(`SetTitleOffset(${offset});;SetBit(${EAxisBits.kCenterTitle},${this.titleCenter?1:0})`); + + if (this.hist_painter && this.hist_axis) + this.hist_painter.getCanvPainter()?.producePadEvent('select', this.hist_painter.getPadPainter(), this); }); title_g.style('cursor', 'move').call(drag_move); @@ -63327,7 +63407,7 @@ class TAxisPainter extends ObjectPainter { label_g = [axis_g.append('svg:g').attr('class', 'axis_labels')], lbl_pos = handle.lbl_pos || handle.major, tilt_angle = gStyle.AxisTiltAngle ?? 25; - let rotate_lbls = axis.TestBit(EAxisBits.kLabelsVert), + let rotate_lbls = this.isRotateLabels(), textscale = 1, flipscale = 1, maxtextlen = 0, applied_scale = 0, lbl_tilt = false, any_modified = false, max_textwidth = 0, max_tiltsize = 0; @@ -63627,9 +63707,6 @@ class TAxisPainter extends ObjectPainter { if (this.is_gaxis) draw_lines = axis.fLineColor !== 0; - // indicate that attributes created not for TAttLine, therefore cannot be updated as TAttLine in GED - this.lineatt.not_standard = true; - if (!this.is_gaxis || (this.name === 'zaxis')) { axis_g = layer.selectChild(`.${this.name}_container`); if (axis_g.empty()) @@ -63717,7 +63794,7 @@ class TAxisPainter extends ObjectPainter { if (!title_g) return; - const rotate = axis.TestBit(EAxisBits.kRotateTitle) ? -1 : 1, + const rotate = this.isRotateTitle() ? -1 : 1, xor_reverse = swap_side ^ this.titleOpposite, myxor = (rotate < 0) ^ xor_reverse; let title_offest_k = side; @@ -64666,7 +64743,8 @@ const TooltipHandler = { if (exact) { const handler = dblckick ? this._dblclick_handler : this._click_handler; - if (handler) res = handler(exact.user_info, pnt); + if (isFunc(handler)) + res = handler(exact.user_info, pnt); } if (!dblckick) { @@ -64931,10 +65009,10 @@ const TooltipHandler = { this.processFrameClick(pnt); break; case 2: - this.getPadPainter()?.selectObjectPainter(this, null, 'xaxis'); + this.getPadPainter()?.selectObjectPainter(this.x_handle); break; case 3: - this.getPadPainter()?.selectObjectPainter(this, null, 'yaxis'); + this.getPadPainter()?.selectObjectPainter(this.y_handle); break; } @@ -66243,9 +66321,14 @@ class TFramePainter extends ObjectPainter { this._frame_height = h; this._frame_rotate = rotate; this._frame_fixpos = fixpos; + this._frame_trans = trans; - if (this.mode3d) return this; // no need to create any elements in 3d mode + return this.mode3d ? this : this.createFrameG(); + } + /** @summary Create frame element and update all attributes + * @private */ + createFrameG() { // this is svg:g object - container for every other items belonging to frame this.draw_g = this.getFrameSvg(); @@ -66275,15 +66358,15 @@ class TFramePainter extends ObjectPainter { this.axes_drawn = this.axes2_drawn = false; - this.draw_g.attr('transform', trans); + this.draw_g.attr('transform', this._frame_trans); - top_rect.attr('d', `M0,0H${w}V${h}H0Z`) + top_rect.attr('d', `M0,0H${this._frame_width}V${this._frame_height}H0Z`) .call(this.fillatt.func) .call(this.lineatt.func); - main_svg.attr('width', w) - .attr('height', h) - .attr('viewBox', `0 0 ${w} ${h}`); + main_svg.attr('width', this._frame_width) + .attr('height', this._frame_height) + .attr('viewBox', `0 0 ${this._frame_width} ${this._frame_height}`); return this; } @@ -66339,7 +66422,9 @@ class TFramePainter extends ObjectPainter { handle = this[`${kind}_handle`]; if (!isFunc(faxis?.TestBit)) return false; - menu.header(`${kind.toUpperCase()} axis`); + const hist_painter = handle?.hist_painter || main; + + menu.header(`${kind.toUpperCase()} axis`, `${urlClassPrefix}${clTAxis}.html`); menu.sub('Range'); menu.add('Zoom', () => { @@ -66389,8 +66474,8 @@ class TFramePainter extends ObjectPainter { } menu.addchk(faxis.TestBit(EAxisBits.kMoreLogLabels), 'More log', flag => { faxis.InvertBit(EAxisBits.kMoreLogLabels); - if (main?.snapid && (kind.length === 1)) - main.interactiveRedraw('pad', `exec:SetMoreLogLabels(${flag})`, kind); + if (hist_painter?.snapid && (kind.length === 1)) + hist_painter.interactiveRedraw('pad', `exec:SetMoreLogLabels(${flag})`, kind); else this.interactiveRedraw('pad'); }); @@ -66399,23 +66484,23 @@ class TFramePainter extends ObjectPainter { faxis.InvertBit(EAxisBits.kNoExponent); if (handle) handle.noexp_changed = true; this[`${kind}_noexp_changed`] = true; - if (main?.snapid && (kind.length === 1)) - main.interactiveRedraw('pad', `exec:SetNoExponent(${flag})`, kind); + if (hist_painter?.snapid && (kind.length === 1)) + hist_painter.interactiveRedraw('pad', `exec:SetNoExponent(${flag})`, kind); else this.interactiveRedraw('pad'); }); - if ((kind === 'z') && isFunc(main?.fillPaletteMenu)) - main.fillPaletteMenu(menu, !is_pal); + if ((kind === 'z') && isFunc(hist_painter?.fillPaletteMenu)) + hist_painter.fillPaletteMenu(menu, !is_pal); - menu.addTAxisMenu(EAxisBits, main || this, faxis, kind, handle, this); + menu.addTAxisMenu(EAxisBits, hist_painter || this, faxis, kind, handle, this); return true; } const alone = menu.size() === 0; if (alone) - menu.header('Frame'); + menu.header('Frame', `${urlClassPrefix}${clTFrame}.html`); else menu.separator(); @@ -69106,27 +69191,28 @@ class TPadPainter extends ObjectPainter { /** @summary Generate pad events, normally handled by GED * @desc in pad painter, while pad may be drawn without canvas * @private */ - producePadEvent(what, padpainter, painter, position, place) { + producePadEvent(what, padpainter, painter, position) { if ((what === 'select') && isFunc(this.selectActivePad)) this.selectActivePad(padpainter, painter, position); if (isFunc(this.pad_events_receiver)) - this.pad_events_receiver({ what, padpainter, painter, position, place }); + this.pad_events_receiver({ what, padpainter, painter, position }); } /** @summary method redirect call to pad events receiver */ - selectObjectPainter(painter, pos, place) { + selectObjectPainter(painter, pos) { const istoppad = this.iscan || !this.has_canvas, - canp = istoppad ? this : this.getCanvPainter(); + canp = istoppad ? this : this.getCanvPainter(); - if (painter === undefined) painter = this; + if (painter === undefined) + painter = this; if (pos && !istoppad) pos = getAbsPosInCanvas(this.svg_this_pad(), pos); selectActivePad({ pp: this, active: true }); - canp?.producePadEvent('select', this, painter, pos, place); + canp?.producePadEvent('select', this, painter, pos); } /** @summary Draw pad active border @@ -69275,8 +69361,10 @@ class TPadPainter extends ObjectPainter { this.createAttFill({ attr: this.pad }); if ((rect.width <= lmt) || (rect.height <= lmt)) { - svg.style('display', 'none'); - console.warn(`Hide canvas while geometry too small w=${rect.width} h=${rect.height}`); + if (this.snapid === undefined) { + svg.style('display', 'none'); + console.warn(`Hide canvas while geometry too small w=${rect.width} h=${rect.height}`); + } if (this._pad_width && this._pad_height) { // use last valid dimensions rect.width = this._pad_width; @@ -69855,9 +69943,9 @@ class TPadPainter extends ObjectPainter { * @private */ fillContextMenu(menu) { if (this.pad) - menu.header(`${this.pad._typename}::${this.pad.fName}`); + menu.header(`${this.pad._typename}::${this.pad.fName}`, `${urlClassPrefix}${this.pad._typename}.html`); else - menu.header('Canvas'); + menu.header('Canvas', `${urlClassPrefix}${clTCanvas}.html`); menu.addchk(this.isTooltipAllowed(), 'Show tooltips', () => this.setTooltipAllowed('toggle')); @@ -70472,7 +70560,7 @@ class TPadPainter extends ObjectPainter { const mainid = this.selectDom().attr('id'); - if (!this.isBatchMode() && !this.use_openui && !this.brlayout && mainid && isStr(mainid)) { + if (!this.isBatchMode() && this.online_canvas && !this.use_openui && !this.brlayout && mainid && isStr(mainid) && !getHPainter()) { this.brlayout = new BrowserLayout(mainid, null, this); this.brlayout.create(mainid, true); this.setDom(this.brlayout.drawing_divid()); // need to create canvas @@ -70941,7 +71029,7 @@ class TPadPainter extends ObjectPainter { const arg = (file_format === 'pdf') ? { node: elem.node(), width, height, reset_tranform: use_frame } - : compressSVG(`${elem.node().innerHTML}`); + : compressSVG(`${elem.node().innerHTML}`); return svgToImage(arg, file_format, args).then(res => { // reactivate border @@ -71463,6 +71551,10 @@ class TCanvasPainter extends TPadPainter { return this.sendWebsocket(`OBJEXEC:${snapid}:${exec}`); } + /** @summary Return true if message can be send via web socket + * @private */ + canSendWebSocket() { return this._websocket?.canSend(); } + /** @summary Send text message with web socket * @desc used for communication with server-side of web canvas * @private */ @@ -71780,7 +71872,6 @@ class TCanvasPainter extends TPadPainter { objpainter?.getPadPainter()?.selectObjectPainter(objpainter); - console.log('activate GED'); this.processChanges('sbits', this); resolveFunc(true); @@ -71830,11 +71921,17 @@ class TCanvasPainter extends TPadPainter { if (this._all_sections_showed) return; this._all_sections_showed = true; + + // used in Canvas.controller.js to avoid browser resize because of initial sections show/hide + this._ignore_section_resize = true; + this.showSection('Menu', this.pad.TestBit(kMenuBar)); this.showSection('StatusBar', this.pad.TestBit(kShowEventStatus)); this.showSection('ToolBar', this.pad.TestBit(kShowToolBar)); this.showSection('Editor', this.pad.TestBit(kShowEditor)); this.showSection('ToolTips', this.pad.TestBit(kShowToolTips) || this._highlight_connect); + + this._ignore_section_resize = false; } /** @summary Handle highlight in canvas - deliver information to server @@ -72214,7 +72311,7 @@ class TPavePainter extends ObjectPainter { svg_code = compressSVG(svg_code); - svg_code = '= 0), postpone_draw = isStr(arg) && (arg.indexOf('postpone') >= 0), cjust = isStr(arg) && (arg.indexOf('cjust') >= 0), + bring_stats_front = isStr(arg) && (arg.indexOf('bring_stats_front') >= 0), pp = this.getPadPainter(), width = pp.getPadWidth(), height = pp.getPadHeight(), @@ -73125,6 +73223,9 @@ class TPavePainter extends ObjectPainter { } } + if (bring_stats_front) + this.getPadPainter()?.findPainterFor(null, '', clTPaveStats)?.bringToFront(); + return this.z_handle.drawAxis(this.draw_g, s_width, s_height, axis_transform, axis_second).then(() => { let rect; if (can_move) { @@ -73563,18 +73664,20 @@ class TPavePainter extends ObjectPainter { } else if ((opt === 'postitle') || painter.isDummyPos(pave)) { const st = gStyle, fp = painter.getFramePainter(); if (st && fp) { - const midx = st.fTitleX, y2 = st.fTitleY, fsz = st.fTitleFontSize; - let w = st.fTitleW, h = st.fTitleH; - - if (!h) h = Math.max((y2 - fp.fY2NDC) * 0.7, (fsz < 1) ? 1.1 * fsz : 1.1 * fsz / fp.getFrameWidth()); - if (!w) w = fp.fX2NDC - fp.fX1NDC; + const midx = st.fTitleX, y2 = st.fTitleY, + valign = st.fTitleAlign % 10, halign = (st.fTitleAlign - valign) / 10, + title = pave.fLines?.arr[0]?.fTitle; + let w = st.fTitleW, h = st.fTitleH, fsz = st.fTitleFontSize; + if (fsz > 1) fsz = fsz / fp.getFrameWidth(); + if (!h) h = Math.max((y2 - fp.fY2NDC) * 0.7, 1.1 * fsz); + if (!w) w = (halign !== 2 && title) ? title.length * fsz * 0.2 : fp.fX2NDC - fp.fX1NDC; if (!Number.isFinite(h) || (h <= 0)) h = 0.06; if (!Number.isFinite(w) || (w <= 0)) w = 0.44; - pave.fX1NDC = midx - w/2; - pave.fY1NDC = y2 - h; - pave.fX2NDC = midx + w/2; - pave.fY2NDC = y2; + pave.fX1NDC = halign < 2 ? midx : (halign > 2 ? midx - w : midx - w/2); + pave.fY1NDC = valign === 3 ? y2 - h : (valign === 2 ? y2 - h / 2 : y2); + pave.fX2NDC = pave.fX1NDC + w; + pave.fY2NDC = pave.fY1NDC + h; pave.fInit = 1; } } @@ -73690,6 +73793,9 @@ class THistDrawOptions { /** @summary Is palette can be used with current draw options */ canHavePalette() { + if (this.ndim === 3) + return this.BoxStyle === 12 || this.BoxStyle === 13 || this.GLBox === 12; + if (this.ndim !== 2) return false; @@ -73784,7 +73890,7 @@ class THistDrawOptions { if (d.check('OPTSTAT', true)) this.optstat = d.partAsInt(); if (d.check('OPTFIT', true)) this.optfit = d.partAsInt(); - if ((this.optstat || this.optstat) && histo?.TestBit(kNoStats)) + if ((this.optstat || this.optfit) && histo?.TestBit(kNoStats)) histo?.InvertBit(kNoStats); if (d.check('NOSTAT')) this.NoStat = true; @@ -75507,6 +75613,7 @@ class THistPainter extends ObjectPainter { case 'ToggleLogY': return fp.toggleAxisLog('y'); case 'ToggleLogZ': return fp.toggleAxisLog('z'); case 'ToggleStatBox': return getPromise(this.toggleStat()); + case 'ToggleColorZ': return this.toggleColz(); } return false; } @@ -75807,7 +75914,7 @@ class THistPainter extends ObjectPainter { // TODO: use weak reference (via pad list of painters and any kind of string) pal.$main_painter = this; - let arg = '', pr; + let arg = 'bring_stats_front', pr; if (postpone_draw) arg += ';postpone'; if (can_move && !this.do_redraw_palette) arg += ';can_move'; if (this.options.Cjust) arg += ';cjust'; @@ -76040,8 +76147,8 @@ class THistPainter extends ObjectPainter { } // find min/max values in selected range - - this.maxbin = this.minbin = this.minposbin = null; + let is_first = true; + this.minposbin = 0; for (i = res.i1; i < res.i2; ++i) { for (j = res.j1; j < res.j2; ++j) { @@ -76051,20 +76158,24 @@ class THistPainter extends ObjectPainter { binarea = (res.grx[i+1]-res.grx[i])*(res.gry[j]-res.gry[j+1]); if (binarea <= 0) continue; res.max = Math.max(res.max, binz); - if ((binz > 0) && ((binz 0) && ((binz < res.min) || (res.min === 0))) res.min = binz; binz = binz/binarea; } - if (this.maxbin === null) + if (is_first) { this.maxbin = this.minbin = binz; - else { + is_first = false; + } else { this.maxbin = Math.max(this.maxbin, binz); this.minbin = Math.min(this.minbin, binz); } - if (binz > 0) - if ((this.minposbin === null) || (binz < this.minposbin)) this.minposbin = binz; + if ((binz > 0) && ((this.minposbin === 0) || (binz < this.minposbin))) + this.minposbin = binz; } } + if (is_first) + this.maxbin = this.minbin = 0; + // force recalculation of z levels this.fContour = null; @@ -76899,7 +77010,6 @@ let TH2Painter$2 = class TH2Painter extends THistPainter { if (this.isMainPainter()) { switch (funcname) { case 'ToggleColor': return this.toggleColor(); - case 'ToggleColorZ': return this.toggleColz(); case 'Toggle3D': return this.toggleMode3D(); } } @@ -78603,7 +78713,7 @@ let TH2Painter$2 = class TH2Painter extends THistPainter { if (this.maxbin > 0.7) factor = 0.7/this.maxbin; const nlevels = Math.round(handle.max - handle.min), - cntr = this.createContour((nlevels > 50) ? 50 : nlevels, this.minposbin, this.maxbin, this.minposbin); + cntr = this.createContour((nlevels > 50) ? 50 : nlevels, this.minposbin, this.maxbin, this.minposbin); // now start build for (i = handle.i1; i < handle.i2; ++i) { @@ -78802,11 +78912,12 @@ let TH2Painter$2 = class TH2Painter extends THistPainter { if (this.options.Circular > 11) { for (let i = 0; i < nbins - 1; ++i) { for (let j = i+1; j < nbins; ++j) { - const cont = hist.getBinContent(i+1, j+1); - if (cont > 0) { - max_value = Math.max(max_value, cont); - if (!min_value || (cont < min_value)) min_value = cont; - } + const cont = hist.getBinContent(i+1, j+1); + if (cont > 0) { + max_value = Math.max(max_value, cont); + if (!min_value || (cont < min_value)) + min_value = cont; + } } } } @@ -79013,9 +79124,8 @@ let TH2Painter$2 = class TH2Painter extends THistPainter { histo = this.getHisto(); return [this.getObjectHint(), - p.swapXY - ? 'y = ' + funcs.axisAsText('y', histo.fYaxis.GetBinLowEdge(p.bin+1)) - : 'x = ' + funcs.axisAsText('x', histo.fXaxis.GetBinLowEdge(p.bin+1)), + p.swapXY ? 'y = ' + funcs.axisAsText('y', histo.fYaxis.GetBinLowEdge(p.bin+1)) + : 'x = ' + funcs.axisAsText('x', histo.fXaxis.GetBinLowEdge(p.bin+1)), 'm-25% = ' + floatToString(p.fBoxDown, gStyle.fStatFormat), 'median = ' + floatToString(p.fMedian, gStyle.fStatFormat), 'm+25% = ' + floatToString(p.fBoxUp, gStyle.fStatFormat)]; @@ -79271,13 +79381,16 @@ let TH2Painter$2 = class TH2Painter extends THistPainter { return null; } - const res = { name: histo.fName, title: histo.fTitle, - x: pnt.x, y: pnt.y, - color1: this.lineatt?.color ?? 'green', - color2: this.fillatt?.getFillColorAlt('blue') ?? 'blue', - lines: this.getBinTooltips(i, j), exact: true, menu: true }; + const res = { + name: histo.fName, title: histo.fTitle, + x: pnt.x, y: pnt.y, + color1: this.lineatt?.color ?? 'green', + color2: this.fillatt?.getFillColorAlt('blue') ?? 'blue', + lines: this.getBinTooltips(i, j), exact: true, menu: true + }; - if (this.options.Color) res.color2 = this.getHistPalette().getColor(colindx); + if (this.options.Color) + res.color2 = this.getHistPalette().getColor(colindx); if (pnt.disabled && !this.is_projection) { ttrect.remove(); @@ -79949,6 +80062,9 @@ function create3DScene(render3d, x3dscale, y3dscale, orthographic) { this.mode3d = false; + if (this.draw_g) + this.createFrameG(); + return; } @@ -80317,7 +80433,9 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (opts.v7) { this.x_handle.pad_name = this.pad_name; this.x_handle.snapid = this.snapid; - } + } else if (opts.hist_painter) + this.x_handle.setHistPainter(opts.hist_painter, 'x'); + this.x_handle.configureAxis('xaxis', this.xmin, this.xmax, xmin, xmax, false, [grminx, grmaxx], { log: pad?.fLogx ?? 0, reverse: opts.reverse_x, logcheckmin: true }); this.x_handle.assignFrameMembers(this, 'x'); @@ -80327,7 +80445,8 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (opts.v7) { this.y_handle.pad_name = this.pad_name; this.y_handle.snapid = this.snapid; - } + } else if (opts.hist_painter) + this.y_handle.setHistPainter(opts.hist_painter, 'y'); this.y_handle.configureAxis('yaxis', this.ymin, this.ymax, ymin, ymax, false, [grminy, grmaxy], { log: pad && !opts.use_y_for_z ? pad.fLogy : 0, reverse: opts.reverse_y, logcheckmin: opts.ndim > 1 }); this.y_handle.assignFrameMembers(this, 'y'); @@ -80337,7 +80456,8 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (opts.v7) { this.z_handle.pad_name = this.pad_name; this.z_handle.snapid = this.snapid; - } + } else if (opts.hist_painter) + this.z_handle.setHistPainter(opts.hist_painter, 'z'); this.z_handle.configureAxis('zaxis', this.zmin, this.zmax, zmin, zmax, false, [grminz, grmaxz], { value_axis: (opts.ndim === 1) || (opts.ndim === 2), log: ((opts.use_y_for_z || (opts.ndim === 2)) ? pad?.fLogv : undefined) ?? pad?.fLogz ?? 0, @@ -80374,7 +80494,9 @@ function drawXYZ(toplevel, AxisPainter, opts) { top.axis_draw = true; // mark element as axis drawing toplevel.add(top); - let ticks = [], lbls = [], maxtextheight = 0; + let ticks = [], lbls = [], maxtextheight = 0, maxtextwidth = 0; + const center_x = this.x_handle.isCenteredLabels(), + rotate_x = this.x_handle.isRotateLabels(); while (xticks.next()) { const grx = xticks.grpos; @@ -80387,7 +80509,7 @@ function drawXYZ(toplevel, AxisPainter, opts) { is_major = false; lbl = ''; } - if (is_major && lbl && opts.draw) { + if (is_major && lbl && opts.draw && (!center_x || !xticks.last_major())) { const mod = xticks.get_modifier(); if (mod?.fLabText) lbl = mod.fLabText; @@ -80399,6 +80521,7 @@ function drawXYZ(toplevel, AxisPainter, opts) { text3d.offsety = this.x_handle.labelsOffset + (grmaxy - grminy) * 0.005; + maxtextwidth = Math.max(maxtextwidth, draw_width); maxtextheight = Math.max(maxtextheight, draw_height); if (mod?.fTextColor) text3d.color = this.getColor(mod.fTextColor); @@ -80411,8 +80534,10 @@ function drawXYZ(toplevel, AxisPainter, opts) { if ((draw_width > 0) && (space > 0)) text_scale = Math.min(text_scale, 0.9*space/draw_width); } + if (rotate_x) + text3d.rotate = 1; - if (this.x_handle.isCenteredLabels()) { + if (center_x) { if (!space) space = Math.min(grx - grminx, grmaxx - grx); text3d.grx += space/2; } @@ -80429,6 +80554,9 @@ function drawXYZ(toplevel, AxisPainter, opts) { text3d.offsety = 1.6 * this.x_handle.titleOffset + (grmaxy - grminy) * 0.005; text3d.grx = (grminx + grmaxx)/2; // default position for centered title text3d.kind = 'title'; + if (this.x_handle.isRotateTitle()) + text3d.rotate = 2; + lbls.push(text3d); } @@ -80547,6 +80675,7 @@ function drawXYZ(toplevel, AxisPainter, opts) { xcont.position.set(0, grminy, grminz); xcont.rotation.x = 1/4*Math.PI; xcont.xyid = 2; + xcont.painter = this.x_handle; if (opts.draw) { xtickslines = createLineSegments(ticks, getLineMaterial(this.x_handle, 'ticks')); @@ -80554,17 +80683,28 @@ function drawXYZ(toplevel, AxisPainter, opts) { } lbls.forEach(lbl => { - const w = lbl.boundingBox.max.x - lbl.boundingBox.min.x, - posx = lbl.center ? lbl.grx - w/2 : (lbl.opposite ? grminx : grmaxx - w), - m = new THREE.Matrix4(); + const dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x, + dy = lbl.boundingBox.max.y - lbl.boundingBox.min.y, + w = (lbl.rotate === 1) ? dy : dx, + posx = lbl.center ? lbl.grx - w/2 : (lbl.opposite ? grminx : grmaxx - w), + posy = -text_scale * (lbl.rotate === 1 ? maxtextwidth : maxtextheight) - this.x_handle.ticksSize - lbl.offsety, + m = new THREE.Matrix4(); // matrix to swap y and z scales and shift along z to its position m.set(text_scale, 0, 0, posx, - 0, text_scale, 0, -maxtextheight*text_scale - this.x_handle.ticksSize - lbl.offsety, + 0, text_scale, 0, posy, 0, 0, 1, 0, 0, 0, 0, 1); const mesh = new THREE.Mesh(lbl, getTextMaterial(this.x_handle, lbl.kind, lbl.color)); + + if (lbl.rotate) + mesh.rotateZ(lbl.rotate * Math.PI / 2); + if (lbl.rotate === 1) + mesh.translateY(-dy); + if (lbl.rotate === 2) + mesh.translateX(-dx); + mesh.applyMatrix4(m); xcont.add(mesh); }); @@ -80576,21 +80716,32 @@ function drawXYZ(toplevel, AxisPainter, opts) { xcont = new THREE.Object3D(); xcont.position.set(0, grmaxy, grminz); xcont.rotation.x = 3/4*Math.PI; + xcont.painter = this.x_handle; if (opts.draw) xcont.add(new THREE.LineSegments(xtickslines.geometry, xtickslines.material)); lbls.forEach(lbl => { - const w = lbl.boundingBox.max.x - lbl.boundingBox.min.x, - posx = (lbl.center ? lbl.grx + w/2 : lbl.opposite ? grminx + w : grmaxx), + const dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x, + dy = lbl.boundingBox.max.y - lbl.boundingBox.min.y, + w = (lbl.rotate === 1) ? dy : dx, + posx = lbl.center ? lbl.grx + w/2 : (lbl.opposite ? grminx + w: grmaxx), + posy = -text_scale * (lbl.rotate === 1 ? maxtextwidth : maxtextheight) - this.x_handle.ticksSize - lbl.offsety, m = new THREE.Matrix4(); // matrix to swap y and z scales and shift along z to its position m.set(-text_scale, 0, 0, posx, - 0, text_scale, 0, -maxtextheight*text_scale - this.x_handle.ticksSize - lbl.offsety, + 0, text_scale, 0, posy, 0, 0, -1, 0, 0, 0, 0, 1); + const mesh = new THREE.Mesh(lbl, getTextMaterial(this.x_handle, lbl.kind, lbl.color)); + if (lbl.rotate) + mesh.rotateZ(lbl.rotate * Math.PI / 2); + if (lbl.rotate === 1) + mesh.translateY(-dy); + if (lbl.rotate === 2) + mesh.translateX(-dx); mesh.applyMatrix4(m); xcont.add(mesh); }); @@ -80600,7 +80751,13 @@ function drawXYZ(toplevel, AxisPainter, opts) { xcont.add(createZoomMesh('x', this.size_x3d)); top.add(xcont); - lbls = []; text_scale = 1; maxtextheight = 0; ticks = []; + lbls = []; + text_scale = 1; + maxtextwidth = maxtextheight = 0; + ticks = []; + + const center_y = this.y_handle.isCenteredLabels(), + rotate_y = this.y_handle.isRotateLabels(); while (yticks.next()) { const gry = yticks.grpos; @@ -80613,16 +80770,17 @@ function drawXYZ(toplevel, AxisPainter, opts) { is_major = false; lbl = ''; } - if (is_major && lbl && opts.draw) { + if (is_major && lbl && opts.draw && (!center_y || !yticks.last_major())) { const mod = yticks.get_modifier(); if (mod?.fLabText) lbl = mod.fLabText; const text3d = createLatexGeometry(this, lbl, this.y_handle.labelsFont.size); text3d.computeBoundingBox(); const draw_width = text3d.boundingBox.max.x - text3d.boundingBox.min.x, - draw_height = text3d.boundingBox.max.y - text3d.boundingBox.min.y; + draw_height = text3d.boundingBox.max.y - text3d.boundingBox.min.y; text3d.center = true; + maxtextwidth = Math.max(maxtextwidth, draw_width); maxtextheight = Math.max(maxtextheight, draw_height); if (mod?.fTextColor) text3d.color = this.getColor(mod.fTextColor); @@ -80636,10 +80794,12 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (draw_width > 0) text_scale = Math.min(text_scale, 0.9*space/draw_width); } - if (this.y_handle.isCenteredLabels()) { + if (center_y) { if (!space) space = Math.min(gry - grminy, grmaxy - gry); text3d.gry += space/2; } + if (rotate_y) + text3d.rotate = 1; } ticks.push(0, gry, 0, this.y_handle.ticksSize*(is_major ? -1 : -0.6), gry, 0); } @@ -80652,6 +80812,8 @@ function drawXYZ(toplevel, AxisPainter, opts) { text3d.offsetx = 1.6 * this.y_handle.titleOffset + (grmaxx - grminx) * 0.005; text3d.gry = (grminy + grmaxy)/2; // default position for centered title text3d.kind = 'title'; + if (this.y_handle.isRotateTitle()) + text3d.rotate = 2; lbls.push(text3d); } @@ -80659,22 +80821,32 @@ function drawXYZ(toplevel, AxisPainter, opts) { let yticksline, ycont = new THREE.Object3D(); ycont.position.set(grminx, 0, grminz); ycont.rotation.y = -1/4*Math.PI; + ycont.painter = this.y_handle; if (opts.draw) { yticksline = createLineSegments(ticks, getLineMaterial(this.y_handle, 'ticks')); ycont.add(yticksline); } lbls.forEach(lbl => { - const w = lbl.boundingBox.max.x - lbl.boundingBox.min.x, - posy = lbl.center ? lbl.gry + w/2 : (lbl.opposite ? grminy + w : grmaxy), - m = new THREE.Matrix4(); - // matrix to swap y and z scales and shift along z to its position - m.set(0, text_scale, 0, -maxtextheight*text_scale - this.y_handle.ticksSize - lbl.offsetx, + const dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x, + dy = lbl.boundingBox.max.y - lbl.boundingBox.min.y, + w = (lbl.rotate === 1) ? dy : dx, + posx = -text_scale * (lbl.rotate === 1 ? maxtextwidth : maxtextheight) - this.y_handle.ticksSize - lbl.offsetx, + posy = lbl.center ? lbl.gry + w/2 : (lbl.opposite ? grminy + w : grmaxy), + m = new THREE.Matrix4(); + m.set(0, text_scale, 0, posx, -text_scale, 0, 0, posy, 0, 0, 1, 0, 0, 0, 0, 1); const mesh = new THREE.Mesh(lbl, getTextMaterial(this.y_handle, lbl.kind, lbl.color)); + if (lbl.rotate) + mesh.rotateZ(lbl.rotate * Math.PI / 2); + if (lbl.rotate === 1) + mesh.translateY(-dy); + if (lbl.rotate === 2) + mesh.translateX(-dx); + mesh.applyMatrix4(m); ycont.add(mesh); }); @@ -80687,19 +80859,31 @@ function drawXYZ(toplevel, AxisPainter, opts) { ycont = new THREE.Object3D(); ycont.position.set(grmaxx, 0, grminz); ycont.rotation.y = -3/4*Math.PI; + ycont.painter = this.y_handle; if (opts.draw) ycont.add(new THREE.LineSegments(yticksline.geometry, yticksline.material)); lbls.forEach(lbl => { - const w = lbl.boundingBox.max.x - lbl.boundingBox.min.x, - posy = lbl.center ? lbl.gry - w/2 : (lbl.opposite ? grminy : grmaxy - w), - m = new THREE.Matrix4(); - m.set(0, text_scale, 0, -maxtextheight*text_scale - this.y_handle.ticksSize - lbl.offsetx, + const dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x, + dy = lbl.boundingBox.max.y - lbl.boundingBox.min.y, + w = (lbl.rotate === 1) ? dy : dx, + posx = -text_scale * (lbl.rotate === 1 ? maxtextwidth : maxtextheight) - this.y_handle.ticksSize - lbl.offsetx, + posy = lbl.center ? lbl.gry - w/2 : (lbl.opposite ? grminy : grmaxy - w), + m = new THREE.Matrix4(); + + m.set(0, text_scale, 0, posx, text_scale, 0, 0, posy, 0, 0, -1, 0, 0, 0, 0, 1); const mesh = new THREE.Mesh(lbl, getTextMaterial(this.y_handle, lbl.kind, lbl.color)); + if (lbl.rotate) + mesh.rotateZ(lbl.rotate * Math.PI / 2); + if (lbl.rotate === 1) + mesh.translateY(-dy); + if (lbl.rotate === 2) + mesh.translateX(-dx); + mesh.applyMatrix4(m); ycont.add(mesh); }); @@ -80713,6 +80897,9 @@ function drawXYZ(toplevel, AxisPainter, opts) { let zgridx = null, zgridy = null, lastmajorz = null, maxzlblwidth = 0; + const center_z = this.z_handle.isCenteredLabels(), + rotate_z = this.z_handle.isRotateLabels(); + if (this.size_z3d && opts.drawany) { zgridx = []; zgridy = []; } @@ -80724,14 +80911,14 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (lbl === null) { is_major = false; lbl = ''; } - if (is_major && lbl && opts.draw) { + if (is_major && lbl && opts.draw && (!center_z || !zticks.last_major())) { const mod = zticks.get_modifier(); if (mod?.fLabText) lbl = mod.fLabText; const text3d = createLatexGeometry(this, lbl, this.z_handle.labelsFont.size); text3d.computeBoundingBox(); const draw_width = text3d.boundingBox.max.x - text3d.boundingBox.min.x, - draw_height = text3d.boundingBox.max.y - text3d.boundingBox.min.y; + draw_height = text3d.boundingBox.max.y - text3d.boundingBox.min.y; text3d.translate(-draw_width, -draw_height/2, 0); if (mod?.fTextColor) text3d.color = this.getColor(mod.fTextColor); @@ -80793,10 +80980,12 @@ function drawXYZ(toplevel, AxisPainter, opts) { zcont.push(new THREE.Object3D()); lbls.forEach((lbl, indx) => { - const m = new THREE.Matrix4(); + const m = new THREE.Matrix4(), + dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x; + let grz = lbl.grz; - if (this.z_handle.isCenteredLabels()) { + if (center_z) { if (indx < lbls.length - 1) grz = (grz + lbls[indx+1].grz) / 2; else if (indx > 0) @@ -80808,6 +80997,8 @@ function drawXYZ(toplevel, AxisPainter, opts) { 0, 0, 1, 0, 0, text_scale, 0, grz); const mesh = new THREE.Mesh(lbl, getTextMaterial(this.z_handle)); + if (rotate_z) + mesh.rotateZ(-Math.PI/2).translateX(dx/2); mesh.applyMatrix4(m); zcont[n].add(mesh); }); @@ -80815,16 +81006,19 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (this.z_handle.fTitle && opts.draw) { const text3d = createLatexGeometry(this, this.z_handle.fTitle, this.z_handle.titleFont.size); text3d.computeBoundingBox(); - const draw_width = text3d.boundingBox.max.x - text3d.boundingBox.min.x, - posz = this.z_handle.titleCenter ? (grmaxz + grminz - draw_width)/2 : (this.z_handle.titleOpposite ? grminz : grmaxz - draw_width); + const dx = text3d.boundingBox.max.x - text3d.boundingBox.min.x, + dy = text3d.boundingBox.max.y - text3d.boundingBox.min.y, + rotate = this.z_handle.isRotateTitle(), + posz = this.z_handle.titleCenter ? (grmaxz + grminz - dx)/2 : (this.z_handle.titleOpposite ? grminz : grmaxz - dx) + (rotate ? dx : 0), + m = new THREE.Matrix4(); - text3d.rotateZ(Math.PI/2); - - const m = new THREE.Matrix4(); m.set(-text_scale, 0, 0, this.z_handle.ticksSize + (grmaxx - grminx) * 0.005 + maxzlblwidth + this.z_handle.titleOffset, 0, 0, 1, 0, 0, text_scale, 0, posz); const mesh = new THREE.Mesh(text3d, getTextMaterial(this.z_handle, 'title')); + mesh.rotateZ(Math.PI*(rotate ? 1.5 : 0.5)); + if (rotate) mesh.translateY(-dy); + mesh.applyMatrix4(m); zcont[n].add(mesh); } @@ -80837,6 +81031,7 @@ function drawXYZ(toplevel, AxisPainter, opts) { zcont[n].zid = n + 2; top.add(zcont[n]); + zcont[n].painter = this.z_handle; } zcont[0].position.set(grminx, grmaxy, 0); @@ -83136,8 +83331,10 @@ class TH1Painter extends TH1Painter$2 { pr = main.create3DScene(this.options.Render3D, this.options.x3dscale, this.options.y3dscale, this.options.Ortho).then(() => { main.setAxesRanges(histo.fXaxis, this.xmin, this.xmax, histo.fYaxis, this.ymin, this.ymax, histo.fZaxis, 0, 0, this); main.set3DOptions(this.options); - main.drawXYZ(main.toplevel, TAxisPainter, { use_y_for_z: true, zmult, zoom: settings.Zooming, ndim: 1, - draw: (this.options.Axis !== -1), drawany: this.options.isCartesian() }); + main.drawXYZ(main.toplevel, TAxisPainter, { + ndim: 1, hist_painter: this, use_y_for_z: true, zmult, zoom: settings.Zooming, + draw: (this.options.Axis !== -1), drawany: this.options.isCartesian() + }); }); } @@ -83330,7 +83527,7 @@ function drawTH2PolyLego(painter) { geometry.setAttribute('position', new THREE.BufferAttribute(pos, 3)); geometry.computeVertexNormals(); - const material = new THREE.MeshBasicMaterial(getMaterialArgs(painter._color_palette?.getColor(colindx), { vertexColors: false })), + const material = new THREE.MeshBasicMaterial(getMaterialArgs(painter._color_palette?.getColor(colindx), { vertexColors: false, side: THREE.DoubleSide })), mesh = new THREE.Mesh(geometry, material); pmain.add3DMesh(mesh); @@ -83406,9 +83603,11 @@ class TH2Painter extends TH2Painter$2 { pr = main.create3DScene(this.options.Render3D, this.options.x3dscale, this.options.y3dscale, this.options.Ortho).then(() => { main.setAxesRanges(histo.fXaxis, this.xmin, this.xmax, histo.fYaxis, this.ymin, this.ymax, histo.fZaxis, this.zmin, this.zmax, this); main.set3DOptions(this.options); - main.drawXYZ(main.toplevel, TAxisPainter, { zmult, zoom: settings.Zooming, ndim: 2, + main.drawXYZ(main.toplevel, TAxisPainter, { + ndim: 2, hist_painter: this, zmult, zoom: settings.Zooming, draw: this.options.Axis !== -1, drawany: this.options.isCartesian(), - reverse_x: this.options.RevX, reverse_y: this.options.RevY }); + reverse_x: this.options.RevX, reverse_y: this.options.RevY + }); }); } @@ -83820,7 +84019,7 @@ class TH3Painter extends THistPainter { if (!this.draw_content) return false; - let box_option = this.options.Box ? this.options.BoxStyle : 0; + let box_option = this.options.BoxStyle; if (!box_option && this.options.Scat) { const promise = this.draw3DScatter(); @@ -83845,7 +84044,8 @@ class TH3Painter extends THistPainter { if ((this.options.GLBox === 11) || (this.options.GLBox === 12)) { tipscale = 0.4; use_lambert = true; - if (this.options.GLBox === 12) use_colors = true; + if (this.options.GLBox === 12) + use_colors = true; single_bin_geom = new THREE.SphereGeometry(0.5, main.webgl ? 16 : 8, main.webgl ? 12 : 6); single_bin_geom.applyMatrix4(new THREE.Matrix4().makeRotationX(Math.PI/2)); @@ -83873,7 +84073,7 @@ class TH3Painter extends THistPainter { if (box_option === 12) use_colors = true; - else if (box_option === 13) { + else if (box_option === 13) { use_colors = true; use_helper = false; } else if (this.options.GLColor) { @@ -84077,8 +84277,10 @@ class TH3Painter extends THistPainter { pr = main.create3DScene(this.options.Render3D, this.options.x3dscale, this.options.y3dscale, this.options.Ortho).then(() => { main.setAxesRanges(histo.fXaxis, this.xmin, this.xmax, histo.fYaxis, this.ymin, this.ymax, histo.fZaxis, this.zmin, this.zmax, this); main.set3DOptions(this.options); - main.drawXYZ(main.toplevel, TAxisPainter, { zoom: settings.Zooming, ndim: 3, - draw: this.options.Axis !== -1, drawany: this.options.isCartesian() }); + main.drawXYZ(main.toplevel, TAxisPainter, { + ndim: 3, hist_painter: this, zoom: settings.Zooming, + draw: this.options.Axis !== -1, drawany: this.options.isCartesian() + }); return this.draw3DBins(); }).then(() => { main.render3D(); @@ -84088,7 +84290,7 @@ class TH3Painter extends THistPainter { } if (this.isMainPainter()) - pr = pr.then(() => this.drawColorPalette(this.options.Zscale && (this._box_option === 12 || this._box_option === 13))); + pr = pr.then(() => this.drawColorPalette(this.options.Zscale && (this._box_option === 12 || this._box_option === 13 || this.options.GLBox === 12))); return pr.then(() => this.updateFunctions()) .then(() => this.updateHistTitle()) @@ -84103,6 +84305,7 @@ class TH3Painter extends THistPainter { pp.addPadButton('auto_zoom', 'Unzoom all axes', 'ToggleZoom', 'Ctrl *'); if (this.draw_content) pp.addPadButton('statbox', 'Toggle stat box', 'ToggleStatBox'); + pp.addPadButton('th2colorz', 'Toggle color palette', 'ToggleColorZ'); pp.showPadButtons(); } @@ -85030,16 +85233,16 @@ function createNormal(axis_name, pos, size) { return new Geometry(node); } -const cfg$1 = { +const cfg = { GradPerSegm: 6, // grad per segment in cylinder/spherical symmetry shapes CompressComp: true // use faces compression in composite shapes }; function geoCfg(name, value) { if (value === undefined) - return cfg$1[name]; + return cfg[name]; - cfg$1[name] = value; + cfg[name] = value; } const kindGeo = 0, // TGeoNode / TGeoShape @@ -85926,7 +86129,7 @@ function createTubeBuffer(shape, faces_limit) { thetaLength = shape.fPhi2 - shape.fPhi1; } - const radiusSegments = Math.max(4, Math.round(thetaLength/cfg$1.GradPerSegm)); + const radiusSegments = Math.max(4, Math.round(thetaLength/cfg.GradPerSegm)); // external surface let numfaces = radiusSegments * (((outerR[0] <= 0) || (outerR[1] <= 0)) ? 1 : 2); @@ -86048,7 +86251,7 @@ function createTubeBuffer(shape, faces_limit) { /** @summary Creates eltu geometry * @private */ function createEltuBuffer(shape, faces_limit) { - const radiusSegments = Math.max(4, Math.round(360/cfg$1.GradPerSegm)); + const radiusSegments = Math.max(4, Math.round(360/cfg.GradPerSegm)); if (faces_limit < 0) return radiusSegments*4; @@ -86099,8 +86302,8 @@ function createEltuBuffer(shape, faces_limit) { * @private */ function createTorusBuffer(shape, faces_limit) { const radius = shape.fR; - let radialSegments = Math.max(6, Math.round(360/cfg$1.GradPerSegm)), - tubularSegments = Math.max(8, Math.round(shape.fDphi/cfg$1.GradPerSegm)), + let radialSegments = Math.max(6, Math.round(360/cfg.GradPerSegm)), + tubularSegments = Math.max(8, Math.round(shape.fDphi/cfg.GradPerSegm)), numfaces = (shape.fRmin > 0 ? 4 : 2) * radialSegments * (tubularSegments + (shape.fDphi !== 360 ? 1 : 0)); if (faces_limit < 0) return numfaces; @@ -86198,7 +86401,7 @@ function createPolygonBuffer(shape, faces_limit) { radiusSegments = shape.fNedges; factor = 1.0 / Math.cos(Math.PI/180 * thetaLength / radiusSegments / 2); } else { - radiusSegments = Math.max(5, Math.round(thetaLength/cfg$1.GradPerSegm)); + radiusSegments = Math.max(5, Math.round(thetaLength/cfg.GradPerSegm)); factor = 1; } @@ -86425,7 +86628,7 @@ function createXtruBuffer(shape, faces_limit) { /** @summary Creates para geometry * @private */ function createParaboloidBuffer(shape, faces_limit) { - let radiusSegments = Math.max(4, Math.round(360/cfg$1.GradPerSegm)), + let radiusSegments = Math.max(4, Math.round(360/cfg.GradPerSegm)), heightSegments = 30; if (faces_limit > 0) { @@ -86520,7 +86723,7 @@ function createHypeBuffer(shape, faces_limit) { if ((shape.fTin === 0) && (shape.fTout === 0)) return createTubeBuffer(shape, faces_limit); - let radiusSegments = Math.max(4, Math.round(360/cfg$1.GradPerSegm)), + let radiusSegments = Math.max(4, Math.round(360/cfg.GradPerSegm)), heightSegments = 30, numfaces = radiusSegments * (heightSegments + 1) * ((shape.fRmin > 0) ? 4 : 2); @@ -86893,7 +87096,7 @@ function createComposite(shape, faces_limit) { return geom1; } - let bsp1 = new Geometry(geom1, matrix1, cfg$1.CompressComp ? 0 : undefined); + let bsp1 = new Geometry(geom1, matrix1, cfg.CompressComp ? 0 : undefined); const bsp2 = new Geometry(geom2, matrix2, bsp1.maxid); @@ -93182,7 +93385,7 @@ class TGeoPainter extends ObjectPainter { ensureBloom(on) { if (on === undefined) { if (this.ctrl.highlight_bloom === 0) - this.ctrl.highlight_bloom = this._webgl && ((typeof navigator === 'undefined') || !/android/i.test(navigator.userAgent)); + this.ctrl.highlight_bloom = this._webgl && !browser.android; on = this.ctrl.highlight_bloom && this.ctrl.getMaterialCfg()?.emissive; } @@ -94223,7 +94426,7 @@ class TGeoPainter extends ObjectPainter { this._webgl = (r.jsroot_render3d === constants$1.Render3D.WebGL); - if (isFunc(r.setPixelRatio) && !isNodeJs()) + if (isFunc(r.setPixelRatio) && !isNodeJs() && !browser.android) r.setPixelRatio(window.devicePixelRatio); r.setSize(w, h, !this._fit_main_area); r.localClippingEnabled = true; @@ -141913,18 +142116,20 @@ async function makePDF(svg, args) { let doc; + const orientation = (svg.width < svg.height) ? 'portrait' : 'landscape'; + if (args?.as_doc) doc = args?.doc; if (doc) { doc.addPage({ - orientation: 'landscape', + orientation, unit: 'px', format: [svg.width + 10, svg.height + 10] }); } else { doc = new jsPDF({ - orientation: 'landscape', + orientation, unit: 'px', format: [svg.width + 10, svg.height + 10] }); @@ -141969,7 +142174,13 @@ async function makePDF(svg, args) { node.removeAttribute('dy'); }); - restore_text.forEach(node => { node.innerHTML = node.$originalHTML; node.setAttribute('font-family', node.$originalFont); }); + restore_text.forEach(node => { + node.innerHTML = node.$originalHTML; + if (node.$originalFont) + node.setAttribute('font-family', node.$originalFont); + else + node.removeAttribute('font-family'); + }); const res = args?.as_buffer ? doc.output('arraybuffer') : doc.output('dataurlstring'); if (nodejs) { @@ -142051,6 +142262,7 @@ drawFuncs = { lst: [ { name: clTCutG, sameas: clTGraph }, { name: /^RooHist/, sameas: clTGraph }, { name: /^RooCurve/, sameas: clTGraph }, + { name: /^RooEllipse/, sameas: clTGraph }, { name: 'TScatter', icon: 'img_graph', class: () => Promise.resolve().then(function () { return TScatterPainter$1; }).then(h => h.TScatterPainter), opt: ';A' }, { name: 'RooPlot', icon: 'img_canvas', func: drawRooPlot }, { name: 'TRatioPlot', icon: 'img_mgraph', class: () => Promise.resolve().then(function () { return TRatioPlotPainter$1; }).then(h => h.TRatioPlotPainter), opt: '' }, @@ -147127,6 +147339,13 @@ async function drawText$1() { assignContextMenu(this, kToFront); + this.fillContextMenuItems = function(menu) { + menu.add('Change text', () => menu.input('Enter new text', text.fTitle).then(t => { + text.fTitle = t; + this.interactiveRedraw('pad', `exec:SetTitle("${t}")`); + })); + }; + return this; }); } @@ -148411,7 +148630,7 @@ let TGraphPainter$1 = class TGraphPainter extends ObjectPainter { rect = { x1: -5, x2: 5, y1: -5, y2: 5 }; const matchx = (pnt.x >= d.grx1 + rect.x1) && (pnt.x <= d.grx1 + rect.x2), - matchy = (pnt.y >= d.gry1 + rect.y1) && (pnt.y <= d.gry1 + rect.y2); + matchy = (pnt.y >= d.gry1 + rect.y1) && (pnt.y <= d.gry1 + rect.y2); if (matchx && (matchy || (pnt.nproc > 1))) { best_dist2 = dist2; @@ -152211,7 +152430,7 @@ class TGraphPolarPainter extends ObjectPainter { for (let n = 0; n < graph.fNpoints; ++n) { const pos = main.translate(graph.fX[n], graph.fY[n]), - dist2 = (pos.x-pnt.x)**2 + (pos.y-pnt.y)**2; + dist2 = (pos.grx-pnt.x)**2 + (pos.gry-pnt.y)**2; if (dist2 < best_dist2) { best_dist2 = dist2; bestindx = n; bestpos = pos; } } @@ -152222,8 +152441,8 @@ class TGraphPolarPainter extends ObjectPainter { const res = { name: this.getObject().fName, title: this.getObject().fTitle, - x: bestpos.x, y: bestpos.y, - color1: this.markeratt?.used ? this.markeratt.color : this.lineatt.color, + x: bestpos.grx, y: bestpos.gry, + color1: (this.markeratt?.used ? this.markeratt.color : undefined) ?? (this.fillatt?.used ? this.fillatt.color : undefined) ?? this.lineatt?.color, exact: Math.sqrt(best_dist2) < 4, lines: [this.getObjectHint()], binindx: bestindx, @@ -155239,8 +155458,8 @@ class TGaxisPainter extends TAxisPainter { }); } - /** @summary Fill TGaxis context */ - fillContextMenu(menu) { + /** @summary Fill TGaxis context menu items */ + fillContextMenuItems(menu) { menu.addTAxisMenu(EAxisBits, this, this.getObject(), ''); } @@ -156602,6 +156821,9 @@ class RAxisPainter extends RObjectPainter { return this.v7EvalAttr('labels_center', false); } + /** @summary Is labels should be rotated */ + isRotateLabels() { return false; } + /** @summary Used to move axis labels instead of zooming * @private */ processLabelsMove(arg, pos) { @@ -157228,7 +157450,7 @@ class RAxisPainter extends RObjectPainter { evnt.stopPropagation(); // disable main context menu evnt.preventDefault(); // disable browser context menu createMenu(evnt, this).then(menu => { - menu.header('RAxisDrawable'); + menu.header('RAxisDrawable', `${urlClassPrefix}ROOT_1_1Experimental_1_1RAxisBase.html`); menu.add('Unzoom', () => this.zoomStandalone()); this.fillAxisContextMenu(menu, ''); menu.show(); @@ -158009,9 +158231,14 @@ class RFramePainter extends RObjectPainter { this._frame_height = h; this._frame_rotate = rotate; this._frame_fixpos = fixpos; + this._frame_trans = trans; - if (this.mode3d) return this; // no need for real draw in mode3d + return this.mode3d ? this : this.createFrameG(); + } + /** @summary Create frame element and update all attributes + * @private */ + createFrameG() { // this is svg:g object - container for every other items belonging to frame this.draw_g = this.getFrameSvg(); @@ -158040,20 +158267,20 @@ class RFramePainter extends RObjectPainter { this.axes_drawn = false; - this.draw_g.attr('transform', trans); + this.draw_g.attr('transform', this._frame_trans); top_rect.attr('x', 0) .attr('y', 0) - .attr('width', w) - .attr('height', h) + .attr('width', this._frame_width) + .attr('height', this._frame_height) .attr('rx', this.lineatt.rx || null) .attr('ry', this.lineatt.ry || null) .call(this.fillatt.func) .call(this.lineatt.func); - main_svg.attr('width', w) - .attr('height', h) - .attr('viewBox', `0 0 ${w} ${h}`); + main_svg.attr('width', this._frame_width) + .attr('height', this._frame_height) + .attr('viewBox', `0 0 ${this._frame_width} ${this._frame_height}`); let pr = Promise.resolve(true); @@ -158403,7 +158630,7 @@ class RFramePainter extends RObjectPainter { const handle = this[kind+'_handle'], faxis = obj || this[kind+'axis']; if (!handle) return false; - menu.header(`${kind.toUpperCase()} axis`); + menu.header(`${kind.toUpperCase()} axis`, `${urlClassPrefix}ROOT_1_1Experimental_1_1RAxisBase.html`); if (isFunc(faxis?.TestBit)) { const main = this.getMainPainter(true); @@ -158417,7 +158644,7 @@ class RFramePainter extends RObjectPainter { const alone = menu.size() === 0; if (alone) - menu.header('Frame'); + menu.header('Frame', `${urlClassPrefix}ROOT_1_1Experimental_1_1RFrame.html`); else menu.separator(); @@ -158832,16 +159059,16 @@ class RPadPainter extends RObjectPainter { /** @summary Generate pad events, normally handled by GED * @desc in pad painter, while pad may be drawn without canvas * @private */ - producePadEvent(what, padpainter, painter, position, place) { + producePadEvent(what, padpainter, painter, position) { if ((what === 'select') && isFunc(this.selectActivePad)) this.selectActivePad(padpainter, painter, position); - if (this.pad_events_receiver) - this.pad_events_receiver({ what, padpainter, painter, position, place }); + if (isFunc(this.pad_events_receiver)) + this.pad_events_receiver({ what, padpainter, painter, position }); } /** @summary method redirect call to pad events receiver */ - selectObjectPainter(painter, pos, place) { + selectObjectPainter(painter, pos) { const istoppad = (this.iscan || !this.has_canvas), canp = istoppad ? this : this.getCanvPainter(); @@ -158852,7 +159079,7 @@ class RPadPainter extends RObjectPainter { selectActivePad({ pp: this, active: true }); - canp.producePadEvent('select', this, painter, pos, place); + canp.producePadEvent('select', this, painter, pos); } /** @summary Set fast drawing property depending on the size @@ -158952,9 +159179,19 @@ class RPadPainter extends RObjectPainter { this.createAttFill({ pattern: 1001, color: 0 }); if ((rect.width <= lmt) || (rect.height <= lmt)) { - svg.style('display', 'none'); - console.warn(`Hide canvas while geometry too small w=${rect.width} h=${rect.height}`); - rect.width = 200; rect.height = 100; // just to complete drawing + if (this.snapid === undefined) { + svg.style('display', 'none'); + console.warn(`Hide canvas while geometry too small w=${rect.width} h=${rect.height}`); + } + if (this._pad_width && this._pad_height) { + // use last valid dimensions + rect.width = this._pad_width; + rect.height = this._pad_height; + } else { + // just to complete drawing. + rect.width = 800; + rect.height = 600; + } } else svg.style('display', null); @@ -159265,7 +159502,9 @@ class RPadPainter extends RObjectPainter { /** @summary Fill pad context menu * @private */ fillContextMenu(menu) { - menu.header(this.iscan ? 'RCanvas' : 'RPad'); + const clname = this.iscan ? 'RCanvas' : 'RPad'; + + menu.header(clname, `${urlClassPrefix}ROOT_1_1Experimental_1_1${clname}.html`); menu.addchk(this.isTooltipAllowed(), 'Show tooltips', () => this.setTooltipAllowed('toggle')); @@ -159697,7 +159936,7 @@ class RPadPainter extends RObjectPainter { const mainid = this.selectDom().attr('id'); - if (!this.isBatchMode() && !this.use_openui && !this.brlayout && mainid && isStr(mainid)) { + if (!this.isBatchMode() && this.online_canvas && !this.use_openui && !this.brlayout && mainid && isStr(mainid) && !getHPainter()) { this.brlayout = new BrowserLayout(mainid, null, this); this.brlayout.create(mainid, true); this.setDom(this.brlayout.drawing_divid()); // need to create canvas @@ -159949,7 +160188,7 @@ class RPadPainter extends RObjectPainter { const arg = (file_format === 'pdf') ? { node: elem.node(), width, height, reset_tranform: use_frame } - : compressSVG(`${elem.node().innerHTML}`); + : compressSVG(`${elem.node().innerHTML}`); return svgToImage(arg, file_format, args).then(res => { for (let k = 0; k < items.length; ++k) { @@ -160390,6 +160629,10 @@ class RCanvasPainter extends RPadPainter { this.sendWebsocket('PRODUCE:' + fname); } + /** @summary Return true if message can be send via web socket + * @private */ + canSendWebSocket() { return this._websocket?.canSend(); } + /** @summary Send message via web socket * @private */ sendWebsocket(msg) { @@ -162830,7 +163073,8 @@ class RHistPainter extends RObjectPainter { } // find min/max values in selected range - this.maxbin = this.minbin = this.minposbin = null; + let is_first = true; + this.minposbin = 0; for (i = res.i1; i < res.i2; i += res.stepi) { for (j = res.j1; j < res.j2; j += res.stepj) { @@ -162844,17 +163088,21 @@ class RHistPainter extends RObjectPainter { if ((binz > 0) && ((binz < res.min) || (res.min === 0))) res.min = binz; binz = binz/binarea; } - if (this.maxbin === null) + if (is_first) { this.maxbin = this.minbin = binz; - else { + is_first = false; + } else { this.maxbin = Math.max(this.maxbin, binz); this.minbin = Math.min(this.minbin, binz); } - if (binz > 0) - if ((this.minposbin === null) || (binz < this.minposbin)) this.minposbin = binz; + if ((binz > 0) && ((this.minposbin === 0) || (binz < this.minposbin))) + this.minposbin = binz; } } + if (is_first) + this.maxbin = this.minbin = 0; + res.palette = pmain.getHistPalette(); if (res.palette) @@ -166033,6 +166281,7 @@ exports.svgToImage = svgToImage; exports.toJSON = toJSON; exports.treeDraw = treeDraw; exports.treeProcess = treeProcess; +exports.urlClassPrefix = urlClassPrefix; exports.version = version; exports.version_date = version_date; exports.version_id = version_id; diff --git a/js/changes.md b/js/changes.md index d428407fecf03..3be2ad37bbd5c 100644 --- a/js/changes.md +++ b/js/changes.md @@ -1,47 +1,67 @@ # JSROOT changelog -## Changes in dev +## Changes in 7.8.x +1. Fix - hidden canvas in Jupyter Lab, https://root-forum.cern.ch/t/63097/ + + +## Changes in 7.8.1 +1. Fix - correctly position title according to gStyle->GetTitleAlign() +2. Fix - tooltips on TGraphPolar +3. Fix - use 'portrait' orientation for PDF pages where width smaller than height +4. Fix - font corruption after PDF generation +5. Fix - support drawing of `RooEllipse` class + + +## Changes in 7.8.0 1. Let use custom time zone for time display, support '&utc' and '&cet' in URL parameters 2. Support gStyle.fLegendFillStyle 3. Let change histogram min/max values via context menu -4. Support Z-scale zooming with TScatter +4. Support Z-scale zooming with `TScatter` 5. Implement "haxis" draw option for histogram to draw only axes for hbar 6. Implement "axisg" and "haxisg" to draw axes with grids -7. Support TH1 marker, text and line drawing superimposed with "haxis" +7. Support `TH1` marker, text and line drawing superimposed with "haxis" 8. Support `TBox`, `TLatex`, `TLine`, `TMarker` drawing on "frame", support drawing on swapped axes 9. `TProfile` and `TProfile2D` projections https://github.com/root-project/root/issues/15851 -10. Draw total histogram from TEfficiency when draw option starts with 'b' -11. Let redraw TEfficiency, THStack and TMultiGraph with different draw options via hist context menu -12. Support 'pads' draw options for TMultiGraph, support context menu for it -13. Let drop object on sub-pads +10. Draw total histogram from `TEfficiency` when draw option starts with 'b' +11. Let redraw `TEfficiency`, `THStack` and `TMultiGraph` with different draw options via hist context menu +12. Support 'pads' draw options for `TMultiGraph`, support context menu for it +13. Let drop objects on sub-pads 14. Properly loads ES6 modules for web canvas -15. Improve performance of TH3/RH3 drawing by using THREE.InstancedMesh +15. Improve performance of `TH3`/`RH3` drawing by using `THREE.InstancedMesh` 16. Implement batch mode with '&batch' URL parameter to create SVG/PNG images with default GUI 17. Adjust node.js implementation to produce identical output with normal browser 18. Create necessary infrastructure for testing with 'puppeteer' -19. Support inject of ES6 modules via '&inject=path.mjs' +19. Support injection of ES6 modules via '&inject=path.mjs' 20. Using importmap for 'jsroot' in all major HTML files and in demos 21. Implement `settings.CutAxisLabels` flag to remove labels which may exceed graphical range -22. Let disable usage of TAxis custom labels via context menu -23. Let configure default draw options via context menu, they can be preserved in the local storage +22. Let disable usage of `TAxis` custom labels via context menu +23. Let configure default draw options via context menu, preserved in the local storage 24. Let save canvas as JSON file from context menu, object as JSON from inspector 25. Upgrade three.js r162 -> r168, use r162 only in node.js because of "gl" module 26. Create unified svg2pdf/jspdf ES6 modules, integrate in jsroot builds -27. Let create multipage PDF document - in TWebCanvas batch mode -28. Let add external links via `#url[link]{label}` syntax - including jsPDF support -29. Support TAttMarker style with line width bigger than 1 -30. Internals - upgrade to eslint 9 -31. Internals - do not select pad (aka gPad) for objects drawing, always use assigned pad painter -32. Fix - properly save zoomed ranges in drawingJSON() -33. Fix - properly redraw TMultiGraph -34. Fix - show empty bin in TProfile2D if it has entries #316 -35. Fix - unzooming on log scale was extending range forevever -36. Fix - do not force style 8 for hist markers -37. Fix - ensure minimal hist title height -38. Fix - disable Bloom effects on Android TGeo displays -39. Fix - handle reordering of fragments in multipart reply #319 -40. Fix - properly show non-zero entries #320 -41. Fix - display empty hist bin if fSumw2 not zero +27. Let create multi-page PDF document - in `TWebCanvas` batch mode +28. Let add in latex external links via `#url[link]{label}` syntax - including jsPDF support +29. Support `TAttMarker` style with line width bigger than 1 +30. Provide link to ROOT class documentation from context menus +31. Implement axis labels and title rotations on lego plots +32. Internals - upgrade to eslint 9 +33. Internals - do not select pad (aka gPad) for objects drawing, always use assigned pad painter +34. Fix - properly save zoomed ranges in drawingJSON() +35. Fix - properly redraw `TMultiGraph` +36. Fix - show empty bin in `TProfile2D` if it has entries #316 +37. Fix - unzooming on log scale was extending range forever +38. Fix - display empty hist bin if fSumw2 not zero +39. Fix - geometry display on android devices + + +## Changes in 7.7.5 +1. Fix - can enable exponent only for log10 axis scale +2. Fix - proper set custom font size in latex +3. Fix - do not force style 8 for hist markers +4. Fix - ensure minimal hist title height +5. Fix - disable Bloom effect on Android +6. Fix - handle reordering of fragments in multipart reply #319 +7. Fix - properly show non-zero entries #320 ## Changes in 7.7.4 diff --git a/js/modules/base/FontHandler.mjs b/js/modules/base/FontHandler.mjs index 496775efc8728..beadcc2bfe560 100644 --- a/js/modules/base/FontHandler.mjs +++ b/js/modules/base/FontHandler.mjs @@ -36,7 +36,7 @@ async function loadFontFile(fname) { if (entry?.promises !== undefined) { return new Promise(resolveFunc => { - cfg.promises.push(resolveFunc); + entry.promises.push(resolveFunc); }); } @@ -100,6 +100,7 @@ class FontHandler { this.size = Math.round(size || 11); this.scale = scale; + this.index = 0; this.func = this.setFont.bind(this); @@ -107,8 +108,11 @@ class FontHandler { if (fontIndex && isObject(fontIndex)) cfg = fontIndex; - else - cfg = root_fonts[(fontIndex && Number.isInteger(fontIndex)) ? Math.floor(fontIndex / 10) : 0]; + else { + if (fontIndex && Number.isInteger(fontIndex)) + this.index = Math.floor(fontIndex / 10); + cfg = root_fonts[this.index]; + } if (cfg) { this.cfg = cfg; diff --git a/js/modules/base/ObjectPainter.mjs b/js/modules/base/ObjectPainter.mjs index 55f9e6d1cfacf..e4efb873bfbe0 100644 --- a/js/modules/base/ObjectPainter.mjs +++ b/js/modules/base/ObjectPainter.mjs @@ -1,6 +1,6 @@ import { select as d3_select, pointer as d3_pointer } from '../d3.mjs'; import { settings, constants, internals, isNodeJs, isBatchMode, getPromise, BIT, - prROOT, clTObjString, clTAxis, isObject, isFunc, isStr, getDocument } from '../core.mjs'; + prROOT, clTObjString, clTAxis, isObject, isFunc, isStr, getDocument, urlClassPrefix } from '../core.mjs'; import { isPlainText, producePlainText, produceLatex, produceMathjax, typesetMathjax, approximateLabelWidth } from './latex.mjs'; import { getElementRect, BasePainter, makeTranslate } from './BasePainter.mjs'; import { TAttMarkerHandler } from './TAttMarkerHandler.mjs'; @@ -891,9 +891,10 @@ class ObjectPainter extends BasePainter { let cl = this.getClassName(); const p = cl.lastIndexOf('::'); if (p > 0) cl = cl.slice(p+2); - const title = (cl && name) ? `${cl}:${name}` : (cl || name || 'object'); + const hdr = (cl && name) ? `${cl}:${name}` : (cl || name || 'object'), + url = (p < 0) ? `${urlClassPrefix}${cl}.html` : ''; - menu.header(title); + menu.header(hdr, url); const size0 = menu.size(); diff --git a/js/modules/base/base3d.mjs b/js/modules/base/base3d.mjs index dd3080df587d7..dc711a8d9906a 100644 --- a/js/modules/base/base3d.mjs +++ b/js/modules/base/base3d.mjs @@ -185,7 +185,7 @@ function createSVGRenderer(as_is, precision, doc) { _textSizeAttr = `viewBox="${wrap.svg_attr.viewBox}" width="${wrap.svg_attr.width}" height="${wrap.svg_attr.height}"`, _textClearAttr = wrap.svg_style.backgroundColor ? ` style="background:${wrap.svg_style.backgroundColor}"` : ''; - return `${wrap.accPath}`; + return `${wrap.accPath}`; }; rndr.fillTargetSVG = function(svg) { @@ -1245,6 +1245,21 @@ function createOrbitControl(painter, camera, scene, renderer, lookat) { const intersects = this.getMouseIntersects(mouse_pos); this.processSingleClick(intersects); } + + if (kind === 3) { + const intersects = this.getMouseIntersects(mouse_pos); + let objpainter = null; + for (let i = 0; !objpainter && (i < intersects.length); ++i) { + const obj3d = intersects[i].object; + objpainter = obj3d.painter || obj3d.parent?.painter; // check one top level + } + if (objpainter) { + // while axis painter not directly appears in the list of primitives, pad and canvas take from frame + const padp = this.painter?.getPadPainter(), + canvp = this.painter?.getCanvPainter(); + canvp?.producePadEvent('select', padp, objpainter); + } + } }; control.lstn_click = function(evnt) { @@ -1258,9 +1273,11 @@ function createOrbitControl(painter, camera, scene, renderer, lookat) { let kind = 0; if (isFunc(this.painter?.getFramePainter()?._click_handler)) - kind = 1; // user click handler + kind = 1; // user click handler else if (this.processSingleClick && this.painter?.options?.mouse_click) kind = 2; // eve7 click handler + else if (this.painter?.getCanvPainter()) + kind = 3; // select event for GED // if normal event, set longer timeout waiting if double click not detected if (kind) @@ -1563,7 +1580,7 @@ class PointsCreator { const handler = new TAttMarkerHandler({ style: args.style, color: args.color, size: 7 }), w = handler.fill ? 1 : 7, - imgdata = '' + + imgdata = `` + ``+ '', dataUrl = prSVG + (isNodeJs() ? imgdata : encodeURIComponent(imgdata)); diff --git a/js/modules/base/latex.mjs b/js/modules/base/latex.mjs index df8a9fc9edd6d..8a627fde45efe 100644 --- a/js/modules/base/latex.mjs +++ b/js/modules/base/latex.mjs @@ -522,7 +522,7 @@ function parseLatex(node, arg, label, curr) { }, createSubPos = fscale => { - return { lvl: curr.lvl + 1, x: 0, y: 0, fsize: curr.fsize*(fscale || 1), color: curr.color, font: curr.font, parent: curr, painter: curr.painter }; + return { lvl: curr.lvl + 1, x: 0, y: 0, fsize: curr.fsize*(fscale || 1), color: curr.color, font: curr.font, parent: curr, painter: curr.painter, italic: curr.italic, bold: curr.bold }; }; while (label) { @@ -869,11 +869,7 @@ function parseLatex(node, arg, label, curr) { const subpos = createSubPos(); - let value; - for (let c = curr; c && (value === undefined && c); c = c.parent) - value = c[found.bi]; - - subpos[found.bi] = !value; + subpos[found.bi] = !subpos[found.bi]; parseLatex(currG(), arg, sublabel, subpos); diff --git a/js/modules/base/makepdf.mjs b/js/modules/base/makepdf.mjs index c36426a073259..d7589ed60da29 100644 --- a/js/modules/base/makepdf.mjs +++ b/js/modules/base/makepdf.mjs @@ -85,18 +85,20 @@ async function makePDF(svg, args) { let doc; + const orientation = (svg.width < svg.height) ? 'portrait' : 'landscape'; + if (args?.as_doc) doc = args?.doc; if (doc) { doc.addPage({ - orientation: 'landscape', + orientation, unit: 'px', format: [svg.width + 10, svg.height + 10] }); } else { doc = new jsPDF({ - orientation: 'landscape', + orientation, unit: 'px', format: [svg.width + 10, svg.height + 10] }); @@ -141,7 +143,13 @@ async function makePDF(svg, args) { node.removeAttribute('dy'); }); - restore_text.forEach(node => { node.innerHTML = node.$originalHTML; node.setAttribute('font-family', node.$originalFont); }); + restore_text.forEach(node => { + node.innerHTML = node.$originalHTML; + if (node.$originalFont) + node.setAttribute('font-family', node.$originalFont); + else + node.removeAttribute('font-family'); + }); const res = args?.as_buffer ? doc.output('arraybuffer') : doc.output('dataurlstring'); if (nodejs) { diff --git a/js/modules/core.mjs b/js/modules/core.mjs index 48baa9f222ff6..2d192a00f9fb4 100644 --- a/js/modules/core.mjs +++ b/js/modules/core.mjs @@ -1,10 +1,10 @@ /** @summary version id * @desc For the JSROOT release the string in format 'major.minor.patch' like '7.0.0' */ -const version_id = 'dev', +const version_id = '7.8.x', /** @summary version date * @desc Release date in format day/month/year like '14/04/2022' */ -version_date = '30/10/2024', +version_date = '10/03/2025', /** @summary version id and date * @desc Produced by concatenation of {@link version_id} and {@link version_date} @@ -101,6 +101,7 @@ if ((typeof document !== 'undefined') && (typeof window !== 'undefined') && (typ browser.chromeVersion = (browser.isChrome || browser.isChromeHeadless) ? parseInt(navigator.userAgent.match(/Chrom(?:e|ium)\/([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/)[1]) : 0; browser.isWin = navigator.userAgent.indexOf('Windows') >= 0; } + browser.android = /android/i.test(navigator.userAgent); browser.touches = ('ontouchend' in document); // identify if touch events are supported browser.screenWidth = window.screen?.width ?? 1200; } @@ -287,8 +288,8 @@ settings = { * @desc When specified, extra URL parameter like ```?stamp=unique_value``` append to each files loaded * In such case browser will be forced to load file content disregards of server cache settings * Can be disabled by providing &usestamp=false in URL or via Settings/Files sub-menu - * @default true */ - UseStamp: true, + * Disabled by default on node.js, enabled in the web browsers */ + UseStamp: !nodejs, /** @summary Maximal number of bytes ranges in http 'Range' header * @desc Some http server has limitations for number of bytes ranges therefore let change maximal number via setting * @default 200 */ @@ -1086,7 +1087,8 @@ const prROOT = 'ROOT.', clTObject = 'TObject', clTNamed = 'TNamed', clTString = clTF1 = 'TF1', clTF2 = 'TF2', clTF3 = 'TF3', clTProfile = 'TProfile', clTProfile2D = 'TProfile2D', clTProfile3D = 'TProfile3D', clTGeoVolume = 'TGeoVolume', clTGeoNode = 'TGeoNode', clTGeoNodeMatrix = 'TGeoNodeMatrix', nsREX = 'ROOT::Experimental::', nsSVG = 'http://www.w3.org/2000/svg', - kNoZoom = -1111, kNoStats = BIT(9), kInspect = 'inspect', kTitle = 'title'; + kNoZoom = -1111, kNoStats = BIT(9), kInspect = 'inspect', kTitle = 'title', + urlClassPrefix = 'https://root.cern/doc/master/class'; /** @summary Create some ROOT classes @@ -1929,7 +1931,8 @@ export { version_id, version_date, version, source_dir, isNodeJs, isBatchMode, s clTProfile, clTProfile2D, clTProfile3D, clTHStack, clTGraph, clTGraph2DErrors, clTGraph2DAsymmErrors, clTGraphPolar, clTGraphPolargram, clTGraphTime, clTCutG, - clTPolyLine3D, clTPolyMarker3D, clTGeoVolume, clTGeoNode, clTGeoNodeMatrix, nsREX, nsSVG, kNoZoom, kNoStats, kInspect, kTitle, + clTPolyLine3D, clTPolyMarker3D, clTGeoVolume, clTGeoNode, clTGeoNodeMatrix, + nsREX, nsSVG, kNoZoom, kNoStats, kInspect, kTitle, urlClassPrefix, isArrayProto, getDocument, BIT, clone, addMethods, parse, parseMulti, toJSON, decodeUrl, findFunction, createHttpRequest, httpRequest, loadModules, loadScript, injectCode, create, createHistogram, setHistogramTitle, createTPolyLine, createTGraph, createTHStack, createTMultiGraph, diff --git a/js/modules/draw.mjs b/js/modules/draw.mjs index fd6bac6b9230d..39ed401c6fc27 100644 --- a/js/modules/draw.mjs +++ b/js/modules/draw.mjs @@ -84,6 +84,7 @@ drawFuncs = { lst: [ { name: clTCutG, sameas: clTGraph }, { name: /^RooHist/, sameas: clTGraph }, { name: /^RooCurve/, sameas: clTGraph }, + { name: /^RooEllipse/, sameas: clTGraph }, { name: 'TScatter', icon: 'img_graph', class: () => import('./hist2d/TScatterPainter.mjs').then(h => h.TScatterPainter), opt: ';A' }, { name: 'RooPlot', icon: 'img_canvas', func: drawRooPlot }, { name: 'TRatioPlot', icon: 'img_mgraph', class: () => import('./draw/TRatioPlotPainter.mjs').then(h => h.TRatioPlotPainter), opt: '' }, diff --git a/js/modules/draw/TGaxisPainter.mjs b/js/modules/draw/TGaxisPainter.mjs index 98b09b105d057..985585734934d 100644 --- a/js/modules/draw/TGaxisPainter.mjs +++ b/js/modules/draw/TGaxisPainter.mjs @@ -139,8 +139,8 @@ class TGaxisPainter extends TAxisPainter { }); } - /** @summary Fill TGaxis context */ - fillContextMenu(menu) { + /** @summary Fill TGaxis context menu items */ + fillContextMenuItems(menu) { menu.addTAxisMenu(EAxisBits, this, this.getObject(), ''); } diff --git a/js/modules/draw/TGraphPolarPainter.mjs b/js/modules/draw/TGraphPolarPainter.mjs index a932bc1129eb1..bafc69f476623 100644 --- a/js/modules/draw/TGraphPolarPainter.mjs +++ b/js/modules/draw/TGraphPolarPainter.mjs @@ -443,7 +443,7 @@ class TGraphPolarPainter extends ObjectPainter { for (let n = 0; n < graph.fNpoints; ++n) { const pos = main.translate(graph.fX[n], graph.fY[n]), - dist2 = (pos.x-pnt.x)**2 + (pos.y-pnt.y)**2; + dist2 = (pos.grx-pnt.x)**2 + (pos.gry-pnt.y)**2; if (dist2 < best_dist2) { best_dist2 = dist2; bestindx = n; bestpos = pos; } } @@ -454,8 +454,8 @@ class TGraphPolarPainter extends ObjectPainter { const res = { name: this.getObject().fName, title: this.getObject().fTitle, - x: bestpos.x, y: bestpos.y, - color1: this.markeratt?.used ? this.markeratt.color : this.lineatt.color, + x: bestpos.grx, y: bestpos.gry, + color1: (this.markeratt?.used ? this.markeratt.color : undefined) ?? (this.fillatt?.used ? this.fillatt.color : undefined) ?? this.lineatt?.color, exact: Math.sqrt(best_dist2) < 4, lines: [this.getObjectHint()], binindx: bestindx, diff --git a/js/modules/draw/more.mjs b/js/modules/draw/more.mjs index b321ab9aab193..4662534193a26 100644 --- a/js/modules/draw/more.mjs +++ b/js/modules/draw/more.mjs @@ -111,6 +111,13 @@ async function drawText() { assignContextMenu(this, kToFront); + this.fillContextMenuItems = function(menu) { + menu.add('Change text', () => menu.input('Enter new text', text.fTitle).then(t => { + text.fTitle = t; + this.interactiveRedraw('pad', `exec:SetTitle("${t}")`); + })); + }; + return this; }); } diff --git a/js/modules/geom/TGeoPainter.mjs b/js/modules/geom/TGeoPainter.mjs index 795e16fa6264c..2924c9379c8a6 100644 --- a/js/modules/geom/TGeoPainter.mjs +++ b/js/modules/geom/TGeoPainter.mjs @@ -1714,7 +1714,7 @@ class TGeoPainter extends ObjectPainter { ensureBloom(on) { if (on === undefined) { if (this.ctrl.highlight_bloom === 0) - this.ctrl.highlight_bloom = this._webgl && ((typeof navigator === 'undefined') || !/android/i.test(navigator.userAgent)); + this.ctrl.highlight_bloom = this._webgl && !browser.android; on = this.ctrl.highlight_bloom && this.ctrl.getMaterialCfg()?.emissive; } @@ -2755,7 +2755,7 @@ class TGeoPainter extends ObjectPainter { this._webgl = (r.jsroot_render3d === constants.Render3D.WebGL); - if (isFunc(r.setPixelRatio) && !isNodeJs()) + if (isFunc(r.setPixelRatio) && !isNodeJs() && !browser.android) r.setPixelRatio(window.devicePixelRatio); r.setSize(w, h, !this._fit_main_area); r.localClippingEnabled = true; diff --git a/js/modules/gpad/RAxisPainter.mjs b/js/modules/gpad/RAxisPainter.mjs index 5a714b478f834..c5290586cec3d 100644 --- a/js/modules/gpad/RAxisPainter.mjs +++ b/js/modules/gpad/RAxisPainter.mjs @@ -1,8 +1,8 @@ -import { settings, isFunc } from '../core.mjs'; import { select as d3_select, pointer as d3_pointer, drag as d3_drag, timeFormat as d3_timeFormat, scaleTime as d3_scaleTime, scaleSymlog as d3_scaleSymlog, scaleLog as d3_scaleLog, scaleLinear as d3_scaleLinear } from '../d3.mjs'; +import { settings, isFunc, urlClassPrefix } from '../core.mjs'; import { makeTranslate, addHighlightStyle } from '../base/BasePainter.mjs'; import { AxisPainterMethods, chooseTimeFormat } from './TAxisPainter.mjs'; import { createMenu } from '../gui/menu.mjs'; @@ -355,6 +355,9 @@ class RAxisPainter extends RObjectPainter { return this.v7EvalAttr('labels_center', false); } + /** @summary Is labels should be rotated */ + isRotateLabels() { return false; } + /** @summary Used to move axis labels instead of zooming * @private */ processLabelsMove(arg, pos) { @@ -981,7 +984,7 @@ class RAxisPainter extends RObjectPainter { evnt.stopPropagation(); // disable main context menu evnt.preventDefault(); // disable browser context menu createMenu(evnt, this).then(menu => { - menu.header('RAxisDrawable'); + menu.header('RAxisDrawable', `${urlClassPrefix}ROOT_1_1Experimental_1_1RAxisBase.html`); menu.add('Unzoom', () => this.zoomStandalone()); this.fillAxisContextMenu(menu, ''); menu.show(); diff --git a/js/modules/gpad/RCanvasPainter.mjs b/js/modules/gpad/RCanvasPainter.mjs index aac198b3e9294..fd32f1c29103b 100644 --- a/js/modules/gpad/RCanvasPainter.mjs +++ b/js/modules/gpad/RCanvasPainter.mjs @@ -205,6 +205,10 @@ class RCanvasPainter extends RPadPainter { this.sendWebsocket('PRODUCE:' + fname); } + /** @summary Return true if message can be send via web socket + * @private */ + canSendWebSocket() { return this._websocket?.canSend(); } + /** @summary Send message via web socket * @private */ sendWebsocket(msg) { diff --git a/js/modules/gpad/RFramePainter.mjs b/js/modules/gpad/RFramePainter.mjs index 51ea8bdb3967b..c45fc844c0a55 100644 --- a/js/modules/gpad/RFramePainter.mjs +++ b/js/modules/gpad/RFramePainter.mjs @@ -1,4 +1,4 @@ -import { gStyle, settings, internals, create, isFunc, isStr, clTAxis, nsREX } from '../core.mjs'; +import { gStyle, settings, internals, create, isFunc, isStr, clTAxis, nsREX, urlClassPrefix } from '../core.mjs'; import { pointer as d3_pointer } from '../d3.mjs'; import { getSvgLineStyle } from '../base/TAttLineHandler.mjs'; import { makeTranslate } from '../base/BasePainter.mjs'; @@ -668,9 +668,14 @@ class RFramePainter extends RObjectPainter { this._frame_height = h; this._frame_rotate = rotate; this._frame_fixpos = fixpos; + this._frame_trans = trans; - if (this.mode3d) return this; // no need for real draw in mode3d + return this.mode3d ? this : this.createFrameG(); + } + /** @summary Create frame element and update all attributes + * @private */ + createFrameG() { // this is svg:g object - container for every other items belonging to frame this.draw_g = this.getFrameSvg(); @@ -699,20 +704,20 @@ class RFramePainter extends RObjectPainter { this.axes_drawn = false; - this.draw_g.attr('transform', trans); + this.draw_g.attr('transform', this._frame_trans); top_rect.attr('x', 0) .attr('y', 0) - .attr('width', w) - .attr('height', h) + .attr('width', this._frame_width) + .attr('height', this._frame_height) .attr('rx', this.lineatt.rx || null) .attr('ry', this.lineatt.ry || null) .call(this.fillatt.func) .call(this.lineatt.func); - main_svg.attr('width', w) - .attr('height', h) - .attr('viewBox', `0 0 ${w} ${h}`); + main_svg.attr('width', this._frame_width) + .attr('height', this._frame_height) + .attr('viewBox', `0 0 ${this._frame_width} ${this._frame_height}`); let pr = Promise.resolve(true); @@ -1062,7 +1067,7 @@ class RFramePainter extends RObjectPainter { const handle = this[kind+'_handle'], faxis = obj || this[kind+'axis']; if (!handle) return false; - menu.header(`${kind.toUpperCase()} axis`); + menu.header(`${kind.toUpperCase()} axis`, `${urlClassPrefix}ROOT_1_1Experimental_1_1RAxisBase.html`); if (isFunc(faxis?.TestBit)) { const main = this.getMainPainter(true); @@ -1076,7 +1081,7 @@ class RFramePainter extends RObjectPainter { const alone = menu.size() === 0; if (alone) - menu.header('Frame'); + menu.header('Frame', `${urlClassPrefix}ROOT_1_1Experimental_1_1RFrame.html`); else menu.separator(); diff --git a/js/modules/gpad/RPadPainter.mjs b/js/modules/gpad/RPadPainter.mjs index a0f676914152f..13c2508e0f340 100644 --- a/js/modules/gpad/RPadPainter.mjs +++ b/js/modules/gpad/RPadPainter.mjs @@ -1,11 +1,11 @@ -import { gStyle, settings, browser, constants, internals, addMethods, - isPromise, getPromise, postponePromise, isBatchMode, isObject, isFunc, isStr, clTPad, clTFrame, nsREX } from '../core.mjs'; +import { gStyle, settings, browser, constants, internals, addMethods, isPromise, getPromise, postponePromise, + isBatchMode, isObject, isFunc, isStr, clTPad, clTFrame, nsREX, nsSVG, urlClassPrefix } from '../core.mjs'; import { ColorPalette, addColor, getRootColors, convertColor } from '../base/colors.mjs'; import { RObjectPainter } from '../base/RObjectPainter.mjs'; import { prSVG, getElementRect, getAbsPosInCanvas, DrawOptions, compressSVG, makeTranslate, svgToImage } from '../base/BasePainter.mjs'; import { selectActivePad, getActivePad } from '../base/ObjectPainter.mjs'; import { registerForResize, saveFile } from '../gui/utils.mjs'; -import { BrowserLayout } from '../gui/display.mjs'; +import { BrowserLayout, getHPainter } from '../gui/display.mjs'; import { createMenu, closeMenu } from '../gui/menu.mjs'; import { PadButtonsHandler } from './TPadPainter.mjs'; @@ -316,16 +316,16 @@ class RPadPainter extends RObjectPainter { /** @summary Generate pad events, normally handled by GED * @desc in pad painter, while pad may be drawn without canvas * @private */ - producePadEvent(what, padpainter, painter, position, place) { + producePadEvent(what, padpainter, painter, position) { if ((what === 'select') && isFunc(this.selectActivePad)) this.selectActivePad(padpainter, painter, position); - if (this.pad_events_receiver) - this.pad_events_receiver({ what, padpainter, painter, position, place }); + if (isFunc(this.pad_events_receiver)) + this.pad_events_receiver({ what, padpainter, painter, position }); } /** @summary method redirect call to pad events receiver */ - selectObjectPainter(painter, pos, place) { + selectObjectPainter(painter, pos) { const istoppad = (this.iscan || !this.has_canvas), canp = istoppad ? this : this.getCanvPainter(); @@ -336,7 +336,7 @@ class RPadPainter extends RObjectPainter { selectActivePad({ pp: this, active: true }); - canp.producePadEvent('select', this, painter, pos, place); + canp.producePadEvent('select', this, painter, pos); } /** @summary Set fast drawing property depending on the size @@ -436,9 +436,19 @@ class RPadPainter extends RObjectPainter { this.createAttFill({ pattern: 1001, color: 0 }); if ((rect.width <= lmt) || (rect.height <= lmt)) { - svg.style('display', 'none'); - console.warn(`Hide canvas while geometry too small w=${rect.width} h=${rect.height}`); - rect.width = 200; rect.height = 100; // just to complete drawing + if (this.snapid === undefined) { + svg.style('display', 'none'); + console.warn(`Hide canvas while geometry too small w=${rect.width} h=${rect.height}`); + } + if (this._pad_width && this._pad_height) { + // use last valid dimensions + rect.width = this._pad_width; + rect.height = this._pad_height; + } else { + // just to complete drawing. + rect.width = 800; + rect.height = 600; + } } else svg.style('display', null); @@ -749,7 +759,9 @@ class RPadPainter extends RObjectPainter { /** @summary Fill pad context menu * @private */ fillContextMenu(menu) { - menu.header(this.iscan ? 'RCanvas' : 'RPad'); + const clname = this.iscan ? 'RCanvas' : 'RPad'; + + menu.header(clname, `${urlClassPrefix}ROOT_1_1Experimental_1_1${clname}.html`); menu.addchk(this.isTooltipAllowed(), 'Show tooltips', () => this.setTooltipAllowed('toggle')); @@ -1181,7 +1193,7 @@ class RPadPainter extends RObjectPainter { const mainid = this.selectDom().attr('id'); - if (!this.isBatchMode() && !this.use_openui && !this.brlayout && mainid && isStr(mainid)) { + if (!this.isBatchMode() && this.online_canvas && !this.use_openui && !this.brlayout && mainid && isStr(mainid) && !getHPainter()) { this.brlayout = new BrowserLayout(mainid, null, this); this.brlayout.create(mainid, true); this.setDom(this.brlayout.drawing_divid()); // need to create canvas @@ -1433,7 +1445,7 @@ class RPadPainter extends RObjectPainter { const arg = (file_format === 'pdf') ? { node: elem.node(), width, height, reset_tranform: use_frame } - : compressSVG(`${elem.node().innerHTML}`); + : compressSVG(`${elem.node().innerHTML}`); return svgToImage(arg, file_format, args).then(res => { for (let k = 0; k < items.length; ++k) { diff --git a/js/modules/gpad/TAxisPainter.mjs b/js/modules/gpad/TAxisPainter.mjs index 55f647500d6b9..72eaf63dbf3bf 100644 --- a/js/modules/gpad/TAxisPainter.mjs +++ b/js/modules/gpad/TAxisPainter.mjs @@ -409,6 +409,9 @@ class TAxisPainter extends ObjectPainter { /** @summary cleanup painter */ cleanup() { this.cleanupAxisPainter(); + delete this.hist_painter; + delete this.hist_axis; + delete this.is_gaxis; super.cleanup(); } @@ -790,12 +793,22 @@ class TAxisPainter extends ObjectPainter { return this.getObject()?.TestBit(EAxisBits.kCenterLabels); } + /** @summary Is labels should be rotated */ + isRotateLabels() { + return this.getObject()?.TestBit(EAxisBits.kLabelsVert); + } + + /** @summary Is title should be rotated */ + isRotateTitle() { + return this.getObject()?.TestBit(EAxisBits.kRotateTitle); + } + /** @summary Add interactive elements to draw axes title */ addTitleDrag(title_g, vertical, offset_k, reverse, axis_length) { if (!settings.MoveResize || this.isBatchMode()) return; - let drag_rect = null, - acc_x, acc_y, new_x, new_y, sign_0, alt_pos, curr_indx; + let drag_rect = null, x_0, y_0, i_0, + acc_x, acc_y, new_x, new_y, sign_0, alt_pos, curr_indx, can_indx0 = true; const drag_move = d3_drag().subject(Object); drag_move.on('start', evnt => { @@ -805,10 +818,11 @@ class TAxisPainter extends ObjectPainter { const box = title_g.node().getBBox(), // check that elements visible, request precise value title_length = vertical ? box.height : box.width; - new_x = acc_x = title_g.property('shift_x'); - new_y = acc_y = title_g.property('shift_y'); + x_0 = new_x = acc_x = title_g.property('shift_x'); + y_0 = new_y = acc_y = title_g.property('shift_y'); sign_0 = vertical ? (acc_x > 0) : (acc_y > 0); // sign should remain + can_indx0 = !this.hist_painter?.snapid; // online canvas does not allow alternate position alt_pos = vertical ? [axis_length, axis_length/2, 0] : [0, axis_length/2, axis_length]; // possible positions const off = vertical ? -title_length/2 : title_length/2; @@ -825,12 +839,13 @@ class TAxisPainter extends ObjectPainter { if (this.titleCenter) curr_indx = 1; - else if (reverse ^ this.titleOpposite) + else if ((reverse ^ this.titleOpposite) && can_indx0) curr_indx = 0; else curr_indx = 2; alt_pos[curr_indx] = vertical ? acc_y : acc_x; + i_0 = curr_indx; drag_rect = title_g.append('rect') .attr('x', box.x) @@ -849,11 +864,13 @@ class TAxisPainter extends ObjectPainter { acc_x += evnt.dx; acc_y += evnt.dy; - let set_x, set_y, besti = 0; + let set_x, set_y, besti = can_indx0 ? 0 : 1; const p = vertical ? acc_y : acc_x; - for (let i = 1; i < 3; ++i) - if (Math.abs(p - alt_pos[i]) < Math.abs(p - alt_pos[besti])) besti = i; + for (let i = 1; i < 3; ++i) { + if (Math.abs(p - alt_pos[i]) < Math.abs(p - alt_pos[besti])) + besti = i; + } if (vertical) { set_x = acc_x; @@ -864,7 +881,9 @@ class TAxisPainter extends ObjectPainter { } if (sign_0 === (vertical ? (set_x > 0) : (set_y > 0))) { - new_x = set_x; new_y = set_y; curr_indx = besti; + new_x = set_x; + new_y = set_y; + curr_indx = besti; makeTranslate(title_g, new_x, new_y); } }).on('end', evnt => { @@ -898,10 +917,14 @@ class TAxisPainter extends ObjectPainter { setBit(EAxisBits.kOppositeTitle, false); this.titleOpposite = false; } - this.submitAxisExec(`SetTitleOffset(${offset});;SetBit(${EAxisBits.kCenterTitle},${this.titleCenter?1:0})`); - drag_rect.remove(); drag_rect = null; + + if ((x_0 !== new_x) || (y_0 !== new_y) || (i_0 !== curr_indx)) + this.submitAxisExec(`SetTitleOffset(${offset});;SetBit(${EAxisBits.kCenterTitle},${this.titleCenter?1:0})`); + + if (this.hist_painter && this.hist_axis) + this.hist_painter.getCanvPainter()?.producePadEvent('select', this.hist_painter.getPadPainter(), this); }); title_g.style('cursor', 'move').call(drag_move); @@ -983,7 +1006,7 @@ class TAxisPainter extends ObjectPainter { label_g = [axis_g.append('svg:g').attr('class', 'axis_labels')], lbl_pos = handle.lbl_pos || handle.major, tilt_angle = gStyle.AxisTiltAngle ?? 25; - let rotate_lbls = axis.TestBit(EAxisBits.kLabelsVert), + let rotate_lbls = this.isRotateLabels(), textscale = 1, flipscale = 1, maxtextlen = 0, applied_scale = 0, lbl_tilt = false, any_modified = false, max_textwidth = 0, max_tiltsize = 0; @@ -1283,9 +1306,6 @@ class TAxisPainter extends ObjectPainter { if (this.is_gaxis) draw_lines = axis.fLineColor !== 0; - // indicate that attributes created not for TAttLine, therefore cannot be updated as TAttLine in GED - this.lineatt.not_standard = true; - if (!this.is_gaxis || (this.name === 'zaxis')) { axis_g = layer.selectChild(`.${this.name}_container`); if (axis_g.empty()) @@ -1373,7 +1393,7 @@ class TAxisPainter extends ObjectPainter { if (!title_g) return; - const rotate = axis.TestBit(EAxisBits.kRotateTitle) ? -1 : 1, + const rotate = this.isRotateTitle() ? -1 : 1, xor_reverse = swap_side ^ this.titleOpposite, myxor = (rotate < 0) ^ xor_reverse; let title_offest_k = side; diff --git a/js/modules/gpad/TCanvasPainter.mjs b/js/modules/gpad/TCanvasPainter.mjs index 6610b683dd646..6f992c9deb058 100644 --- a/js/modules/gpad/TCanvasPainter.mjs +++ b/js/modules/gpad/TCanvasPainter.mjs @@ -277,6 +277,10 @@ class TCanvasPainter extends TPadPainter { return this.sendWebsocket(`OBJEXEC:${snapid}:${exec}`); } + /** @summary Return true if message can be send via web socket + * @private */ + canSendWebSocket() { return this._websocket?.canSend(); } + /** @summary Send text message with web socket * @desc used for communication with server-side of web canvas * @private */ @@ -594,7 +598,6 @@ class TCanvasPainter extends TPadPainter { objpainter?.getPadPainter()?.selectObjectPainter(objpainter); - console.log('activate GED'); this.processChanges('sbits', this); resolveFunc(true); @@ -644,11 +647,17 @@ class TCanvasPainter extends TPadPainter { if (this._all_sections_showed) return; this._all_sections_showed = true; + + // used in Canvas.controller.js to avoid browser resize because of initial sections show/hide + this._ignore_section_resize = true; + this.showSection('Menu', this.pad.TestBit(kMenuBar)); this.showSection('StatusBar', this.pad.TestBit(kShowEventStatus)); this.showSection('ToolBar', this.pad.TestBit(kShowToolBar)); this.showSection('Editor', this.pad.TestBit(kShowEditor)); this.showSection('ToolTips', this.pad.TestBit(kShowToolTips) || this._highlight_connect); + + this._ignore_section_resize = false; } /** @summary Handle highlight in canvas - deliver information to server diff --git a/js/modules/gpad/TFramePainter.mjs b/js/modules/gpad/TFramePainter.mjs index 02b1ee1fbde99..00c974ce7801d 100644 --- a/js/modules/gpad/TFramePainter.mjs +++ b/js/modules/gpad/TFramePainter.mjs @@ -1,4 +1,5 @@ -import { gStyle, settings, internals, isFunc, isStr, postponePromise, browser, clTAxis, kNoZoom } from '../core.mjs'; +import { gStyle, settings, internals, isFunc, isStr, postponePromise, browser, + clTAxis, clTFrame, kNoZoom, urlClassPrefix } from '../core.mjs'; import { select as d3_select, pointer as d3_pointer, pointers as d3_pointers, drag as d3_drag } from '../d3.mjs'; import { getElementRect, getAbsPosInCanvas, makeTranslate, addHighlightStyle } from '../base/BasePainter.mjs'; import { getActivePad, ObjectPainter, EAxisBits, kAxisLabels } from '../base/ObjectPainter.mjs'; @@ -913,7 +914,8 @@ const TooltipHandler = { if (exact) { const handler = dblckick ? this._dblclick_handler : this._click_handler; - if (handler) res = handler(exact.user_info, pnt); + if (isFunc(handler)) + res = handler(exact.user_info, pnt); } if (!dblckick) { @@ -1178,10 +1180,10 @@ const TooltipHandler = { this.processFrameClick(pnt); break; case 2: - this.getPadPainter()?.selectObjectPainter(this, null, 'xaxis'); + this.getPadPainter()?.selectObjectPainter(this.x_handle); break; case 3: - this.getPadPainter()?.selectObjectPainter(this, null, 'yaxis'); + this.getPadPainter()?.selectObjectPainter(this.y_handle); break; } @@ -2490,9 +2492,14 @@ class TFramePainter extends ObjectPainter { this._frame_height = h; this._frame_rotate = rotate; this._frame_fixpos = fixpos; + this._frame_trans = trans; - if (this.mode3d) return this; // no need to create any elements in 3d mode + return this.mode3d ? this : this.createFrameG(); + } + /** @summary Create frame element and update all attributes + * @private */ + createFrameG() { // this is svg:g object - container for every other items belonging to frame this.draw_g = this.getFrameSvg(); @@ -2522,15 +2529,15 @@ class TFramePainter extends ObjectPainter { this.axes_drawn = this.axes2_drawn = false; - this.draw_g.attr('transform', trans); + this.draw_g.attr('transform', this._frame_trans); - top_rect.attr('d', `M0,0H${w}V${h}H0Z`) + top_rect.attr('d', `M0,0H${this._frame_width}V${this._frame_height}H0Z`) .call(this.fillatt.func) .call(this.lineatt.func); - main_svg.attr('width', w) - .attr('height', h) - .attr('viewBox', `0 0 ${w} ${h}`); + main_svg.attr('width', this._frame_width) + .attr('height', this._frame_height) + .attr('viewBox', `0 0 ${this._frame_width} ${this._frame_height}`); return this; } @@ -2586,7 +2593,9 @@ class TFramePainter extends ObjectPainter { handle = this[`${kind}_handle`]; if (!isFunc(faxis?.TestBit)) return false; - menu.header(`${kind.toUpperCase()} axis`); + const hist_painter = handle?.hist_painter || main; + + menu.header(`${kind.toUpperCase()} axis`, `${urlClassPrefix}${clTAxis}.html`); menu.sub('Range'); menu.add('Zoom', () => { @@ -2636,8 +2645,8 @@ class TFramePainter extends ObjectPainter { } menu.addchk(faxis.TestBit(EAxisBits.kMoreLogLabels), 'More log', flag => { faxis.InvertBit(EAxisBits.kMoreLogLabels); - if (main?.snapid && (kind.length === 1)) - main.interactiveRedraw('pad', `exec:SetMoreLogLabels(${flag})`, kind); + if (hist_painter?.snapid && (kind.length === 1)) + hist_painter.interactiveRedraw('pad', `exec:SetMoreLogLabels(${flag})`, kind); else this.interactiveRedraw('pad'); }); @@ -2646,23 +2655,23 @@ class TFramePainter extends ObjectPainter { faxis.InvertBit(EAxisBits.kNoExponent); if (handle) handle.noexp_changed = true; this[`${kind}_noexp_changed`] = true; - if (main?.snapid && (kind.length === 1)) - main.interactiveRedraw('pad', `exec:SetNoExponent(${flag})`, kind); + if (hist_painter?.snapid && (kind.length === 1)) + hist_painter.interactiveRedraw('pad', `exec:SetNoExponent(${flag})`, kind); else this.interactiveRedraw('pad'); }); - if ((kind === 'z') && isFunc(main?.fillPaletteMenu)) - main.fillPaletteMenu(menu, !is_pal); + if ((kind === 'z') && isFunc(hist_painter?.fillPaletteMenu)) + hist_painter.fillPaletteMenu(menu, !is_pal); - menu.addTAxisMenu(EAxisBits, main || this, faxis, kind, handle, this); + menu.addTAxisMenu(EAxisBits, hist_painter || this, faxis, kind, handle, this); return true; } const alone = menu.size() === 0; if (alone) - menu.header('Frame'); + menu.header('Frame', `${urlClassPrefix}${clTFrame}.html`); else menu.separator(); diff --git a/js/modules/gpad/TPadPainter.mjs b/js/modules/gpad/TPadPainter.mjs index f12fa338d5866..93aef74d9a517 100644 --- a/js/modules/gpad/TPadPainter.mjs +++ b/js/modules/gpad/TPadPainter.mjs @@ -1,7 +1,7 @@ import { gStyle, settings, constants, browser, internals, BIT, create, toJSON, isBatchMode, loadModules, loadScript, injectCode, isPromise, getPromise, postponePromise, - isObject, isFunc, isStr, clTObjArray, clTPaveText, clTColor, clTPad, clTFrame, clTStyle, clTLegend, - clTHStack, clTMultiGraph, clTLegendEntry, nsSVG, kTitle, clTList } from '../core.mjs'; + isObject, isFunc, isStr, clTObjArray, clTPaveText, clTColor, clTPad, clTCanvas, clTFrame, clTStyle, clTLegend, + clTHStack, clTMultiGraph, clTLegendEntry, nsSVG, kTitle, clTList, urlClassPrefix } from '../core.mjs'; import { select as d3_select, rgb as d3_rgb } from '../d3.mjs'; import { ColorPalette, adoptRootColors, getColorPalette, getGrayColors, extendRootColors, getRGBfromTColor, decodeWebCanvasColors } from '../base/colors.mjs'; @@ -512,27 +512,28 @@ class TPadPainter extends ObjectPainter { /** @summary Generate pad events, normally handled by GED * @desc in pad painter, while pad may be drawn without canvas * @private */ - producePadEvent(what, padpainter, painter, position, place) { + producePadEvent(what, padpainter, painter, position) { if ((what === 'select') && isFunc(this.selectActivePad)) this.selectActivePad(padpainter, painter, position); if (isFunc(this.pad_events_receiver)) - this.pad_events_receiver({ what, padpainter, painter, position, place }); + this.pad_events_receiver({ what, padpainter, painter, position }); } /** @summary method redirect call to pad events receiver */ - selectObjectPainter(painter, pos, place) { + selectObjectPainter(painter, pos) { const istoppad = this.iscan || !this.has_canvas, - canp = istoppad ? this : this.getCanvPainter(); + canp = istoppad ? this : this.getCanvPainter(); - if (painter === undefined) painter = this; + if (painter === undefined) + painter = this; if (pos && !istoppad) pos = getAbsPosInCanvas(this.svg_this_pad(), pos); selectActivePad({ pp: this, active: true }); - canp?.producePadEvent('select', this, painter, pos, place); + canp?.producePadEvent('select', this, painter, pos); } /** @summary Draw pad active border @@ -681,8 +682,10 @@ class TPadPainter extends ObjectPainter { this.createAttFill({ attr: this.pad }); if ((rect.width <= lmt) || (rect.height <= lmt)) { - svg.style('display', 'none'); - console.warn(`Hide canvas while geometry too small w=${rect.width} h=${rect.height}`); + if (this.snapid === undefined) { + svg.style('display', 'none'); + console.warn(`Hide canvas while geometry too small w=${rect.width} h=${rect.height}`); + } if (this._pad_width && this._pad_height) { // use last valid dimensions rect.width = this._pad_width; @@ -1261,9 +1264,9 @@ class TPadPainter extends ObjectPainter { * @private */ fillContextMenu(menu) { if (this.pad) - menu.header(`${this.pad._typename}::${this.pad.fName}`); + menu.header(`${this.pad._typename}::${this.pad.fName}`, `${urlClassPrefix}${this.pad._typename}.html`); else - menu.header('Canvas'); + menu.header('Canvas', `${urlClassPrefix}${clTCanvas}.html`); menu.addchk(this.isTooltipAllowed(), 'Show tooltips', () => this.setTooltipAllowed('toggle')); @@ -1878,7 +1881,7 @@ class TPadPainter extends ObjectPainter { const mainid = this.selectDom().attr('id'); - if (!this.isBatchMode() && !this.use_openui && !this.brlayout && mainid && isStr(mainid)) { + if (!this.isBatchMode() && this.online_canvas && !this.use_openui && !this.brlayout && mainid && isStr(mainid) && !getHPainter()) { this.brlayout = new BrowserLayout(mainid, null, this); this.brlayout.create(mainid, true); this.setDom(this.brlayout.drawing_divid()); // need to create canvas @@ -2347,7 +2350,7 @@ class TPadPainter extends ObjectPainter { const arg = (file_format === 'pdf') ? { node: elem.node(), width, height, reset_tranform: use_frame } - : compressSVG(`${elem.node().innerHTML}`); + : compressSVG(`${elem.node().innerHTML}`); return svgToImage(arg, file_format, args).then(res => { // reactivate border diff --git a/js/modules/gui/menu.mjs b/js/modules/gui/menu.mjs index a7049b617b502..063430d5cd4f3 100644 --- a/js/modules/gui/menu.mjs +++ b/js/modules/gui/menu.mjs @@ -103,8 +103,8 @@ class JSRootMenu { } /** @summary Add menu header - must be first entry */ - header(name) { - this.add(sHeader + name); + header(name, title) { + this.add(sHeader + name, undefined, undefined, title); } /** @summary Add draw sub-menu with draw options @@ -744,8 +744,10 @@ class JSRootMenu { }); this.addchk(faxis.TestBit(EAxisBits.kCenterTitle), 'Center', arg => { faxis.InvertBit(EAxisBits.kCenterTitle); painter.interactiveRedraw('pad', `exec:CenterTitle(${arg})`, kind); }); - this.addchk(faxis.TestBit(EAxisBits.kOppositeTitle), 'Opposite', - () => { faxis.InvertBit(EAxisBits.kOppositeTitle); painter.redrawPad(); }); + if (!painter?.snapid) { + this.addchk(faxis.TestBit(EAxisBits.kOppositeTitle), 'Opposite', + () => { faxis.InvertBit(EAxisBits.kOppositeTitle); painter.redrawPad(); }); + } this.addchk(faxis.TestBit(EAxisBits.kRotateTitle), 'Rotate', arg => { faxis.InvertBit(EAxisBits.kRotateTitle); painter.interactiveRedraw('pad', is_gaxis ? `exec:SetBit(TAxis::kRotateTitle, ${arg})` : `exec:RotateTitle(${arg})`, kind); }); if (is_gaxis) { @@ -1173,7 +1175,7 @@ class StandaloneMenu extends JSRootMenu { return curr.push({ divider: true }); if (name.indexOf(sHeader) === 0) - return curr.push({ text: name.slice(sHeader.length), header: true }); + return curr.push({ text: name.slice(sHeader.length), header: true, title }); if (name === sEndsub) { this.stack.pop(); @@ -1281,16 +1283,50 @@ class StandaloneMenu extends JSRootMenu { if (d.header) { item.style = 'background-color: lightblue; padding: 3px 7px; font-weight: bold; border-bottom: 1px;'; - item.innerHTML = d.text; + + let url = '', title = ''; + if (d.title) { + const p = d.title.indexOf('https://'); + if (p >= 0) { + url = d.title.slice(p); + title = d.title.slice(0, p); + } else + title = d.title; + } + if (!url) + item.innerHTML = d.text; + else { + item.style.display = 'flex'; + item.style['justify-content'] = 'space-between'; + + const txt = doc.createElement('span'); + txt.innerHTML = d.text; + txt.style = 'display: inline-block; margin: 0;'; + item.appendChild(txt); + + const anchor = doc.createElement('span'); + anchor.style = 'margin: 0; color: blue; opacity: 0.1; margin-left: 7px; right: 3px; display: inline-block; cursor: pointer;'; + anchor.textContent = '?'; + anchor.title = url; + anchor.addEventListener('click', () => { + const cp = this.painter?.getCanvPainter(); + if (cp?.canSendWebSocket()) + cp.sendWebsocket(`SHOWURL:${url}`); + else + window.open(url); + }); + anchor.addEventListener('mouseenter', () => { anchor.style.opacity = 1; }); + anchor.addEventListener('mouseleave', () => { anchor.style.opacity = 0.1; }); + item.appendChild(anchor); + } + if (title) + item.setAttribute('title', title); + return; } const hovArea = doc.createElement('div'); - hovArea.style.width = '100%'; - hovArea.style.height = '100%'; - hovArea.style.display = 'flex'; - hovArea.style.justifyContent = 'space-between'; - hovArea.style.cursor = 'pointer'; + hovArea.style = 'width: 100%; height: 100%; display: flex; justify-content: space-between; cursor: pointer;'; if (d.title) hovArea.setAttribute('title', d.title); item.appendChild(hovArea); @@ -1347,7 +1383,7 @@ class StandaloneMenu extends JSRootMenu { if (d.extraText || d.sub) { const extraText = doc.createElement('span'); extraText.className = 'jsroot_ctxt_extraText'; - extraText.style = 'margin: 0; padding: 3px 7px; color: rgb(0, 0, 0, 0.6);'; + extraText.style = 'margin: 0; padding: 3px 7px; color: rgba(0, 0, 0, 0.6);'; extraText.textContent = d.sub ? '\u25B6' : d.extraText; hovArea.appendChild(extraText); diff --git a/js/modules/hist/TH1Painter.mjs b/js/modules/hist/TH1Painter.mjs index 321b689c792fc..4f704aa13caaf 100644 --- a/js/modules/hist/TH1Painter.mjs +++ b/js/modules/hist/TH1Painter.mjs @@ -33,8 +33,10 @@ class TH1Painter extends TH1Painter2D { pr = main.create3DScene(this.options.Render3D, this.options.x3dscale, this.options.y3dscale, this.options.Ortho).then(() => { main.setAxesRanges(histo.fXaxis, this.xmin, this.xmax, histo.fYaxis, this.ymin, this.ymax, histo.fZaxis, 0, 0, this); main.set3DOptions(this.options); - main.drawXYZ(main.toplevel, TAxisPainter, { use_y_for_z: true, zmult, zoom: settings.Zooming, ndim: 1, - draw: (this.options.Axis !== -1), drawany: this.options.isCartesian() }); + main.drawXYZ(main.toplevel, TAxisPainter, { + ndim: 1, hist_painter: this, use_y_for_z: true, zmult, zoom: settings.Zooming, + draw: (this.options.Axis !== -1), drawany: this.options.isCartesian() + }); }); } diff --git a/js/modules/hist/TH2Painter.mjs b/js/modules/hist/TH2Painter.mjs index 09ccf8a02ddf9..3b4f0bf615e2b 100644 --- a/js/modules/hist/TH2Painter.mjs +++ b/js/modules/hist/TH2Painter.mjs @@ -165,7 +165,7 @@ function drawTH2PolyLego(painter) { geometry.setAttribute('position', new THREE.BufferAttribute(pos, 3)); geometry.computeVertexNormals(); - const material = new THREE.MeshBasicMaterial(getMaterialArgs(painter._color_palette?.getColor(colindx), { vertexColors: false })), + const material = new THREE.MeshBasicMaterial(getMaterialArgs(painter._color_palette?.getColor(colindx), { vertexColors: false, side: THREE.DoubleSide })), mesh = new THREE.Mesh(geometry, material); pmain.add3DMesh(mesh); @@ -241,9 +241,11 @@ class TH2Painter extends TH2Painter2D { pr = main.create3DScene(this.options.Render3D, this.options.x3dscale, this.options.y3dscale, this.options.Ortho).then(() => { main.setAxesRanges(histo.fXaxis, this.xmin, this.xmax, histo.fYaxis, this.ymin, this.ymax, histo.fZaxis, this.zmin, this.zmax, this); main.set3DOptions(this.options); - main.drawXYZ(main.toplevel, TAxisPainter, { zmult, zoom: settings.Zooming, ndim: 2, + main.drawXYZ(main.toplevel, TAxisPainter, { + ndim: 2, hist_painter: this, zmult, zoom: settings.Zooming, draw: this.options.Axis !== -1, drawany: this.options.isCartesian(), - reverse_x: this.options.RevX, reverse_y: this.options.RevY }); + reverse_x: this.options.RevX, reverse_y: this.options.RevY + }); }); } diff --git a/js/modules/hist/TH3Painter.mjs b/js/modules/hist/TH3Painter.mjs index 506314b216fda..643c759d9dad8 100644 --- a/js/modules/hist/TH3Painter.mjs +++ b/js/modules/hist/TH3Painter.mjs @@ -369,7 +369,7 @@ class TH3Painter extends THistPainter { if (!this.draw_content) return false; - let box_option = this.options.Box ? this.options.BoxStyle : 0; + let box_option = this.options.BoxStyle; if (!box_option && this.options.Scat) { const promise = this.draw3DScatter(); @@ -394,7 +394,8 @@ class TH3Painter extends THistPainter { if ((this.options.GLBox === 11) || (this.options.GLBox === 12)) { tipscale = 0.4; use_lambert = true; - if (this.options.GLBox === 12) use_colors = true; + if (this.options.GLBox === 12) + use_colors = true; single_bin_geom = new THREE.SphereGeometry(0.5, main.webgl ? 16 : 8, main.webgl ? 12 : 6); single_bin_geom.applyMatrix4(new THREE.Matrix4().makeRotationX(Math.PI/2)); @@ -422,7 +423,7 @@ class TH3Painter extends THistPainter { if (box_option === 12) use_colors = true; - else if (box_option === 13) { + else if (box_option === 13) { use_colors = true; use_helper = false; } else if (this.options.GLColor) { @@ -626,8 +627,10 @@ class TH3Painter extends THistPainter { pr = main.create3DScene(this.options.Render3D, this.options.x3dscale, this.options.y3dscale, this.options.Ortho).then(() => { main.setAxesRanges(histo.fXaxis, this.xmin, this.xmax, histo.fYaxis, this.ymin, this.ymax, histo.fZaxis, this.zmin, this.zmax, this); main.set3DOptions(this.options); - main.drawXYZ(main.toplevel, TAxisPainter, { zoom: settings.Zooming, ndim: 3, - draw: this.options.Axis !== -1, drawany: this.options.isCartesian() }); + main.drawXYZ(main.toplevel, TAxisPainter, { + ndim: 3, hist_painter: this, zoom: settings.Zooming, + draw: this.options.Axis !== -1, drawany: this.options.isCartesian() + }); return this.draw3DBins(); }).then(() => { main.render3D(); @@ -637,7 +640,7 @@ class TH3Painter extends THistPainter { } if (this.isMainPainter()) - pr = pr.then(() => this.drawColorPalette(this.options.Zscale && (this._box_option === 12 || this._box_option === 13))); + pr = pr.then(() => this.drawColorPalette(this.options.Zscale && (this._box_option === 12 || this._box_option === 13 || this.options.GLBox === 12))); return pr.then(() => this.updateFunctions()) .then(() => this.updateHistTitle()) @@ -652,6 +655,7 @@ class TH3Painter extends THistPainter { pp.addPadButton('auto_zoom', 'Unzoom all axes', 'ToggleZoom', 'Ctrl *'); if (this.draw_content) pp.addPadButton('statbox', 'Toggle stat box', 'ToggleStatBox'); + pp.addPadButton('th2colorz', 'Toggle color palette', 'ToggleColorZ'); pp.showPadButtons(); } diff --git a/js/modules/hist/TPavePainter.mjs b/js/modules/hist/TPavePainter.mjs index a591bf658d4a8..4f9aa9fd5c60f 100644 --- a/js/modules/hist/TPavePainter.mjs +++ b/js/modules/hist/TPavePainter.mjs @@ -1,6 +1,6 @@ import { gStyle, browser, settings, clone, isObject, isFunc, isStr, BIT, clTPave, clTPaveText, clTPavesText, clTPaveStats, clTPaveLabel, clTPaveClass, clTDiamond, clTLegend, clTPaletteAxis, - clTText, clTLatex, clTLine, clTBox, kTitle, isNodeJs } from '../core.mjs'; + clTText, clTLatex, clTLine, clTBox, kTitle, isNodeJs, nsSVG } from '../core.mjs'; import { select as d3_select, rgb as d3_rgb, pointer as d3_pointer } from '../d3.mjs'; import { Prob } from '../base/math.mjs'; import { floatToString, makeTranslate, compressSVG, svgToImage, addHighlightStyle } from '../base/BasePainter.mjs'; @@ -49,7 +49,7 @@ class TPavePainter extends ObjectPainter { svg_code = compressSVG(svg_code); - svg_code = '= 0), postpone_draw = isStr(arg) && (arg.indexOf('postpone') >= 0), cjust = isStr(arg) && (arg.indexOf('cjust') >= 0), + bring_stats_front = isStr(arg) && (arg.indexOf('bring_stats_front') >= 0), pp = this.getPadPainter(), width = pp.getPadWidth(), height = pp.getPadHeight(), @@ -960,6 +961,9 @@ class TPavePainter extends ObjectPainter { } } + if (bring_stats_front) + this.getPadPainter()?.findPainterFor(null, '', clTPaveStats)?.bringToFront(); + return this.z_handle.drawAxis(this.draw_g, s_width, s_height, axis_transform, axis_second).then(() => { let rect; if (can_move) { @@ -1398,18 +1402,20 @@ class TPavePainter extends ObjectPainter { } else if ((opt === 'postitle') || painter.isDummyPos(pave)) { const st = gStyle, fp = painter.getFramePainter(); if (st && fp) { - const midx = st.fTitleX, y2 = st.fTitleY, fsz = st.fTitleFontSize; - let w = st.fTitleW, h = st.fTitleH; - - if (!h) h = Math.max((y2 - fp.fY2NDC) * 0.7, (fsz < 1) ? 1.1 * fsz : 1.1 * fsz / fp.getFrameWidth()); - if (!w) w = fp.fX2NDC - fp.fX1NDC; + const midx = st.fTitleX, y2 = st.fTitleY, + valign = st.fTitleAlign % 10, halign = (st.fTitleAlign - valign) / 10, + title = pave.fLines?.arr[0]?.fTitle; + let w = st.fTitleW, h = st.fTitleH, fsz = st.fTitleFontSize; + if (fsz > 1) fsz = fsz / fp.getFrameWidth(); + if (!h) h = Math.max((y2 - fp.fY2NDC) * 0.7, 1.1 * fsz); + if (!w) w = (halign !== 2 && title) ? title.length * fsz * 0.2 : fp.fX2NDC - fp.fX1NDC; if (!Number.isFinite(h) || (h <= 0)) h = 0.06; if (!Number.isFinite(w) || (w <= 0)) w = 0.44; - pave.fX1NDC = midx - w/2; - pave.fY1NDC = y2 - h; - pave.fX2NDC = midx + w/2; - pave.fY2NDC = y2; + pave.fX1NDC = halign < 2 ? midx : (halign > 2 ? midx - w : midx - w/2); + pave.fY1NDC = valign === 3 ? y2 - h : (valign === 2 ? y2 - h / 2 : y2); + pave.fX2NDC = pave.fX1NDC + w; + pave.fY2NDC = pave.fY1NDC + h; pave.fInit = 1; } } diff --git a/js/modules/hist/hist3d.mjs b/js/modules/hist/hist3d.mjs index 012c524bf046d..330e6d7ee3647 100644 --- a/js/modules/hist/hist3d.mjs +++ b/js/modules/hist/hist3d.mjs @@ -507,6 +507,9 @@ function create3DScene(render3d, x3dscale, y3dscale, orthographic) { this.mode3d = false; + if (this.draw_g) + this.createFrameG(); + return; } @@ -875,7 +878,9 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (opts.v7) { this.x_handle.pad_name = this.pad_name; this.x_handle.snapid = this.snapid; - } + } else if (opts.hist_painter) + this.x_handle.setHistPainter(opts.hist_painter, 'x'); + this.x_handle.configureAxis('xaxis', this.xmin, this.xmax, xmin, xmax, false, [grminx, grmaxx], { log: pad?.fLogx ?? 0, reverse: opts.reverse_x, logcheckmin: true }); this.x_handle.assignFrameMembers(this, 'x'); @@ -885,7 +890,8 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (opts.v7) { this.y_handle.pad_name = this.pad_name; this.y_handle.snapid = this.snapid; - } + } else if (opts.hist_painter) + this.y_handle.setHistPainter(opts.hist_painter, 'y'); this.y_handle.configureAxis('yaxis', this.ymin, this.ymax, ymin, ymax, false, [grminy, grmaxy], { log: pad && !opts.use_y_for_z ? pad.fLogy : 0, reverse: opts.reverse_y, logcheckmin: opts.ndim > 1 }); this.y_handle.assignFrameMembers(this, 'y'); @@ -895,7 +901,8 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (opts.v7) { this.z_handle.pad_name = this.pad_name; this.z_handle.snapid = this.snapid; - } + } else if (opts.hist_painter) + this.z_handle.setHistPainter(opts.hist_painter, 'z'); this.z_handle.configureAxis('zaxis', this.zmin, this.zmax, zmin, zmax, false, [grminz, grmaxz], { value_axis: (opts.ndim === 1) || (opts.ndim === 2), log: ((opts.use_y_for_z || (opts.ndim === 2)) ? pad?.fLogv : undefined) ?? pad?.fLogz ?? 0, @@ -932,7 +939,9 @@ function drawXYZ(toplevel, AxisPainter, opts) { top.axis_draw = true; // mark element as axis drawing toplevel.add(top); - let ticks = [], lbls = [], maxtextheight = 0; + let ticks = [], lbls = [], maxtextheight = 0, maxtextwidth = 0; + const center_x = this.x_handle.isCenteredLabels(), + rotate_x = this.x_handle.isRotateLabels(); while (xticks.next()) { const grx = xticks.grpos; @@ -945,7 +954,7 @@ function drawXYZ(toplevel, AxisPainter, opts) { is_major = false; lbl = ''; } - if (is_major && lbl && opts.draw) { + if (is_major && lbl && opts.draw && (!center_x || !xticks.last_major())) { const mod = xticks.get_modifier(); if (mod?.fLabText) lbl = mod.fLabText; @@ -957,6 +966,7 @@ function drawXYZ(toplevel, AxisPainter, opts) { text3d.offsety = this.x_handle.labelsOffset + (grmaxy - grminy) * 0.005; + maxtextwidth = Math.max(maxtextwidth, draw_width); maxtextheight = Math.max(maxtextheight, draw_height); if (mod?.fTextColor) text3d.color = this.getColor(mod.fTextColor); @@ -969,8 +979,10 @@ function drawXYZ(toplevel, AxisPainter, opts) { if ((draw_width > 0) && (space > 0)) text_scale = Math.min(text_scale, 0.9*space/draw_width); } + if (rotate_x) + text3d.rotate = 1; - if (this.x_handle.isCenteredLabels()) { + if (center_x) { if (!space) space = Math.min(grx - grminx, grmaxx - grx); text3d.grx += space/2; } @@ -987,6 +999,9 @@ function drawXYZ(toplevel, AxisPainter, opts) { text3d.offsety = 1.6 * this.x_handle.titleOffset + (grmaxy - grminy) * 0.005; text3d.grx = (grminx + grmaxx)/2; // default position for centered title text3d.kind = 'title'; + if (this.x_handle.isRotateTitle()) + text3d.rotate = 2; + lbls.push(text3d); } @@ -1105,6 +1120,7 @@ function drawXYZ(toplevel, AxisPainter, opts) { xcont.position.set(0, grminy, grminz); xcont.rotation.x = 1/4*Math.PI; xcont.xyid = 2; + xcont.painter = this.x_handle; if (opts.draw) { xtickslines = createLineSegments(ticks, getLineMaterial(this.x_handle, 'ticks')); @@ -1112,17 +1128,28 @@ function drawXYZ(toplevel, AxisPainter, opts) { } lbls.forEach(lbl => { - const w = lbl.boundingBox.max.x - lbl.boundingBox.min.x, - posx = lbl.center ? lbl.grx - w/2 : (lbl.opposite ? grminx : grmaxx - w), - m = new THREE.Matrix4(); + const dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x, + dy = lbl.boundingBox.max.y - lbl.boundingBox.min.y, + w = (lbl.rotate === 1) ? dy : dx, + posx = lbl.center ? lbl.grx - w/2 : (lbl.opposite ? grminx : grmaxx - w), + posy = -text_scale * (lbl.rotate === 1 ? maxtextwidth : maxtextheight) - this.x_handle.ticksSize - lbl.offsety, + m = new THREE.Matrix4(); // matrix to swap y and z scales and shift along z to its position m.set(text_scale, 0, 0, posx, - 0, text_scale, 0, -maxtextheight*text_scale - this.x_handle.ticksSize - lbl.offsety, + 0, text_scale, 0, posy, 0, 0, 1, 0, 0, 0, 0, 1); const mesh = new THREE.Mesh(lbl, getTextMaterial(this.x_handle, lbl.kind, lbl.color)); + + if (lbl.rotate) + mesh.rotateZ(lbl.rotate * Math.PI / 2); + if (lbl.rotate === 1) + mesh.translateY(-dy); + if (lbl.rotate === 2) + mesh.translateX(-dx); + mesh.applyMatrix4(m); xcont.add(mesh); }); @@ -1134,21 +1161,32 @@ function drawXYZ(toplevel, AxisPainter, opts) { xcont = new THREE.Object3D(); xcont.position.set(0, grmaxy, grminz); xcont.rotation.x = 3/4*Math.PI; + xcont.painter = this.x_handle; if (opts.draw) xcont.add(new THREE.LineSegments(xtickslines.geometry, xtickslines.material)); lbls.forEach(lbl => { - const w = lbl.boundingBox.max.x - lbl.boundingBox.min.x, - posx = (lbl.center ? lbl.grx + w/2 : lbl.opposite ? grminx + w : grmaxx), + const dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x, + dy = lbl.boundingBox.max.y - lbl.boundingBox.min.y, + w = (lbl.rotate === 1) ? dy : dx, + posx = lbl.center ? lbl.grx + w/2 : (lbl.opposite ? grminx + w: grmaxx), + posy = -text_scale * (lbl.rotate === 1 ? maxtextwidth : maxtextheight) - this.x_handle.ticksSize - lbl.offsety, m = new THREE.Matrix4(); // matrix to swap y and z scales and shift along z to its position m.set(-text_scale, 0, 0, posx, - 0, text_scale, 0, -maxtextheight*text_scale - this.x_handle.ticksSize - lbl.offsety, + 0, text_scale, 0, posy, 0, 0, -1, 0, 0, 0, 0, 1); + const mesh = new THREE.Mesh(lbl, getTextMaterial(this.x_handle, lbl.kind, lbl.color)); + if (lbl.rotate) + mesh.rotateZ(lbl.rotate * Math.PI / 2); + if (lbl.rotate === 1) + mesh.translateY(-dy); + if (lbl.rotate === 2) + mesh.translateX(-dx); mesh.applyMatrix4(m); xcont.add(mesh); }); @@ -1158,7 +1196,13 @@ function drawXYZ(toplevel, AxisPainter, opts) { xcont.add(createZoomMesh('x', this.size_x3d)); top.add(xcont); - lbls = []; text_scale = 1; maxtextheight = 0; ticks = []; + lbls = []; + text_scale = 1; + maxtextwidth = maxtextheight = 0; + ticks = []; + + const center_y = this.y_handle.isCenteredLabels(), + rotate_y = this.y_handle.isRotateLabels(); while (yticks.next()) { const gry = yticks.grpos; @@ -1171,16 +1215,17 @@ function drawXYZ(toplevel, AxisPainter, opts) { is_major = false; lbl = ''; } - if (is_major && lbl && opts.draw) { + if (is_major && lbl && opts.draw && (!center_y || !yticks.last_major())) { const mod = yticks.get_modifier(); if (mod?.fLabText) lbl = mod.fLabText; const text3d = createLatexGeometry(this, lbl, this.y_handle.labelsFont.size); text3d.computeBoundingBox(); const draw_width = text3d.boundingBox.max.x - text3d.boundingBox.min.x, - draw_height = text3d.boundingBox.max.y - text3d.boundingBox.min.y; + draw_height = text3d.boundingBox.max.y - text3d.boundingBox.min.y; text3d.center = true; + maxtextwidth = Math.max(maxtextwidth, draw_width); maxtextheight = Math.max(maxtextheight, draw_height); if (mod?.fTextColor) text3d.color = this.getColor(mod.fTextColor); @@ -1194,10 +1239,12 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (draw_width > 0) text_scale = Math.min(text_scale, 0.9*space/draw_width); } - if (this.y_handle.isCenteredLabels()) { + if (center_y) { if (!space) space = Math.min(gry - grminy, grmaxy - gry); text3d.gry += space/2; } + if (rotate_y) + text3d.rotate = 1; } ticks.push(0, gry, 0, this.y_handle.ticksSize*(is_major ? -1 : -0.6), gry, 0); } @@ -1210,6 +1257,8 @@ function drawXYZ(toplevel, AxisPainter, opts) { text3d.offsetx = 1.6 * this.y_handle.titleOffset + (grmaxx - grminx) * 0.005; text3d.gry = (grminy + grmaxy)/2; // default position for centered title text3d.kind = 'title'; + if (this.y_handle.isRotateTitle()) + text3d.rotate = 2; lbls.push(text3d); } @@ -1217,22 +1266,32 @@ function drawXYZ(toplevel, AxisPainter, opts) { let yticksline, ycont = new THREE.Object3D(); ycont.position.set(grminx, 0, grminz); ycont.rotation.y = -1/4*Math.PI; + ycont.painter = this.y_handle; if (opts.draw) { yticksline = createLineSegments(ticks, getLineMaterial(this.y_handle, 'ticks')); ycont.add(yticksline); } lbls.forEach(lbl => { - const w = lbl.boundingBox.max.x - lbl.boundingBox.min.x, - posy = lbl.center ? lbl.gry + w/2 : (lbl.opposite ? grminy + w : grmaxy), - m = new THREE.Matrix4(); - // matrix to swap y and z scales and shift along z to its position - m.set(0, text_scale, 0, -maxtextheight*text_scale - this.y_handle.ticksSize - lbl.offsetx, + const dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x, + dy = lbl.boundingBox.max.y - lbl.boundingBox.min.y, + w = (lbl.rotate === 1) ? dy : dx, + posx = -text_scale * (lbl.rotate === 1 ? maxtextwidth : maxtextheight) - this.y_handle.ticksSize - lbl.offsetx, + posy = lbl.center ? lbl.gry + w/2 : (lbl.opposite ? grminy + w : grmaxy), + m = new THREE.Matrix4(); + m.set(0, text_scale, 0, posx, -text_scale, 0, 0, posy, 0, 0, 1, 0, 0, 0, 0, 1); const mesh = new THREE.Mesh(lbl, getTextMaterial(this.y_handle, lbl.kind, lbl.color)); + if (lbl.rotate) + mesh.rotateZ(lbl.rotate * Math.PI / 2); + if (lbl.rotate === 1) + mesh.translateY(-dy); + if (lbl.rotate === 2) + mesh.translateX(-dx); + mesh.applyMatrix4(m); ycont.add(mesh); }); @@ -1245,19 +1304,31 @@ function drawXYZ(toplevel, AxisPainter, opts) { ycont = new THREE.Object3D(); ycont.position.set(grmaxx, 0, grminz); ycont.rotation.y = -3/4*Math.PI; + ycont.painter = this.y_handle; if (opts.draw) ycont.add(new THREE.LineSegments(yticksline.geometry, yticksline.material)); lbls.forEach(lbl => { - const w = lbl.boundingBox.max.x - lbl.boundingBox.min.x, - posy = lbl.center ? lbl.gry - w/2 : (lbl.opposite ? grminy : grmaxy - w), - m = new THREE.Matrix4(); - m.set(0, text_scale, 0, -maxtextheight*text_scale - this.y_handle.ticksSize - lbl.offsetx, + const dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x, + dy = lbl.boundingBox.max.y - lbl.boundingBox.min.y, + w = (lbl.rotate === 1) ? dy : dx, + posx = -text_scale * (lbl.rotate === 1 ? maxtextwidth : maxtextheight) - this.y_handle.ticksSize - lbl.offsetx, + posy = lbl.center ? lbl.gry - w/2 : (lbl.opposite ? grminy : grmaxy - w), + m = new THREE.Matrix4(); + + m.set(0, text_scale, 0, posx, text_scale, 0, 0, posy, 0, 0, -1, 0, 0, 0, 0, 1); const mesh = new THREE.Mesh(lbl, getTextMaterial(this.y_handle, lbl.kind, lbl.color)); + if (lbl.rotate) + mesh.rotateZ(lbl.rotate * Math.PI / 2); + if (lbl.rotate === 1) + mesh.translateY(-dy); + if (lbl.rotate === 2) + mesh.translateX(-dx); + mesh.applyMatrix4(m); ycont.add(mesh); }); @@ -1271,6 +1342,9 @@ function drawXYZ(toplevel, AxisPainter, opts) { let zgridx = null, zgridy = null, lastmajorz = null, maxzlblwidth = 0; + const center_z = this.z_handle.isCenteredLabels(), + rotate_z = this.z_handle.isRotateLabels(); + if (this.size_z3d && opts.drawany) { zgridx = []; zgridy = []; } @@ -1282,14 +1356,14 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (lbl === null) { is_major = false; lbl = ''; } - if (is_major && lbl && opts.draw) { + if (is_major && lbl && opts.draw && (!center_z || !zticks.last_major())) { const mod = zticks.get_modifier(); if (mod?.fLabText) lbl = mod.fLabText; const text3d = createLatexGeometry(this, lbl, this.z_handle.labelsFont.size); text3d.computeBoundingBox(); const draw_width = text3d.boundingBox.max.x - text3d.boundingBox.min.x, - draw_height = text3d.boundingBox.max.y - text3d.boundingBox.min.y; + draw_height = text3d.boundingBox.max.y - text3d.boundingBox.min.y; text3d.translate(-draw_width, -draw_height/2, 0); if (mod?.fTextColor) text3d.color = this.getColor(mod.fTextColor); @@ -1351,10 +1425,12 @@ function drawXYZ(toplevel, AxisPainter, opts) { zcont.push(new THREE.Object3D()); lbls.forEach((lbl, indx) => { - const m = new THREE.Matrix4(); + const m = new THREE.Matrix4(), + dx = lbl.boundingBox.max.x - lbl.boundingBox.min.x; + let grz = lbl.grz; - if (this.z_handle.isCenteredLabels()) { + if (center_z) { if (indx < lbls.length - 1) grz = (grz + lbls[indx+1].grz) / 2; else if (indx > 0) @@ -1366,6 +1442,8 @@ function drawXYZ(toplevel, AxisPainter, opts) { 0, 0, 1, 0, 0, text_scale, 0, grz); const mesh = new THREE.Mesh(lbl, getTextMaterial(this.z_handle)); + if (rotate_z) + mesh.rotateZ(-Math.PI/2).translateX(dx/2); mesh.applyMatrix4(m); zcont[n].add(mesh); }); @@ -1373,16 +1451,19 @@ function drawXYZ(toplevel, AxisPainter, opts) { if (this.z_handle.fTitle && opts.draw) { const text3d = createLatexGeometry(this, this.z_handle.fTitle, this.z_handle.titleFont.size); text3d.computeBoundingBox(); - const draw_width = text3d.boundingBox.max.x - text3d.boundingBox.min.x, - posz = this.z_handle.titleCenter ? (grmaxz + grminz - draw_width)/2 : (this.z_handle.titleOpposite ? grminz : grmaxz - draw_width); + const dx = text3d.boundingBox.max.x - text3d.boundingBox.min.x, + dy = text3d.boundingBox.max.y - text3d.boundingBox.min.y, + rotate = this.z_handle.isRotateTitle(), + posz = this.z_handle.titleCenter ? (grmaxz + grminz - dx)/2 : (this.z_handle.titleOpposite ? grminz : grmaxz - dx) + (rotate ? dx : 0), + m = new THREE.Matrix4(); - text3d.rotateZ(Math.PI/2); - - const m = new THREE.Matrix4(); m.set(-text_scale, 0, 0, this.z_handle.ticksSize + (grmaxx - grminx) * 0.005 + maxzlblwidth + this.z_handle.titleOffset, 0, 0, 1, 0, 0, text_scale, 0, posz); const mesh = new THREE.Mesh(text3d, getTextMaterial(this.z_handle, 'title')); + mesh.rotateZ(Math.PI*(rotate ? 1.5 : 0.5)); + if (rotate) mesh.translateY(-dy); + mesh.applyMatrix4(m); zcont[n].add(mesh); } @@ -1395,6 +1476,7 @@ function drawXYZ(toplevel, AxisPainter, opts) { zcont[n].zid = n + 2; top.add(zcont[n]); + zcont[n].painter = this.z_handle; } zcont[0].position.set(grminx, grmaxy, 0); diff --git a/js/modules/hist2d/RHistPainter.mjs b/js/modules/hist2d/RHistPainter.mjs index ac723ecb1da5c..99fde6ddab399 100644 --- a/js/modules/hist2d/RHistPainter.mjs +++ b/js/modules/hist2d/RHistPainter.mjs @@ -814,7 +814,8 @@ class RHistPainter extends RObjectPainter { } // find min/max values in selected range - this.maxbin = this.minbin = this.minposbin = null; + let is_first = true; + this.minposbin = 0; for (i = res.i1; i < res.i2; i += res.stepi) { for (j = res.j1; j < res.j2; j += res.stepj) { @@ -828,17 +829,21 @@ class RHistPainter extends RObjectPainter { if ((binz > 0) && ((binz < res.min) || (res.min === 0))) res.min = binz; binz = binz/binarea; } - if (this.maxbin === null) + if (is_first) { this.maxbin = this.minbin = binz; - else { + is_first = false; + } else { this.maxbin = Math.max(this.maxbin, binz); this.minbin = Math.min(this.minbin, binz); } - if (binz > 0) - if ((this.minposbin === null) || (binz < this.minposbin)) this.minposbin = binz; + if ((binz > 0) && ((this.minposbin === 0) || (binz < this.minposbin))) + this.minposbin = binz; } } + if (is_first) + this.maxbin = this.minbin = 0; + res.palette = pmain.getHistPalette(); if (res.palette) diff --git a/js/modules/hist2d/TGraphPainter.mjs b/js/modules/hist2d/TGraphPainter.mjs index e239d9dc5861f..668f089630d56 100644 --- a/js/modules/hist2d/TGraphPainter.mjs +++ b/js/modules/hist2d/TGraphPainter.mjs @@ -1011,7 +1011,7 @@ class TGraphPainter extends ObjectPainter { rect = { x1: -5, x2: 5, y1: -5, y2: 5 }; const matchx = (pnt.x >= d.grx1 + rect.x1) && (pnt.x <= d.grx1 + rect.x2), - matchy = (pnt.y >= d.gry1 + rect.y1) && (pnt.y <= d.gry1 + rect.y2); + matchy = (pnt.y >= d.gry1 + rect.y1) && (pnt.y <= d.gry1 + rect.y2); if (matchx && (matchy || (pnt.nproc > 1))) { best_dist2 = dist2; diff --git a/js/modules/hist2d/TH2Painter.mjs b/js/modules/hist2d/TH2Painter.mjs index 4ac1416eae223..27fa2668c41c1 100644 --- a/js/modules/hist2d/TH2Painter.mjs +++ b/js/modules/hist2d/TH2Painter.mjs @@ -782,7 +782,6 @@ class TH2Painter extends THistPainter { if (this.isMainPainter()) { switch (funcname) { case 'ToggleColor': return this.toggleColor(); - case 'ToggleColorZ': return this.toggleColz(); case 'Toggle3D': return this.toggleMode3D(); } } @@ -2489,7 +2488,7 @@ class TH2Painter extends THistPainter { if (this.maxbin > 0.7) factor = 0.7/this.maxbin; const nlevels = Math.round(handle.max - handle.min), - cntr = this.createContour((nlevels > 50) ? 50 : nlevels, this.minposbin, this.maxbin, this.minposbin); + cntr = this.createContour((nlevels > 50) ? 50 : nlevels, this.minposbin, this.maxbin, this.minposbin); // now start build for (i = handle.i1; i < handle.i2; ++i) { @@ -2688,11 +2687,12 @@ class TH2Painter extends THistPainter { if (this.options.Circular > 11) { for (let i = 0; i < nbins - 1; ++i) { for (let j = i+1; j < nbins; ++j) { - const cont = hist.getBinContent(i+1, j+1); - if (cont > 0) { - max_value = Math.max(max_value, cont); - if (!min_value || (cont < min_value)) min_value = cont; - } + const cont = hist.getBinContent(i+1, j+1); + if (cont > 0) { + max_value = Math.max(max_value, cont); + if (!min_value || (cont < min_value)) + min_value = cont; + } } } } @@ -2899,9 +2899,8 @@ class TH2Painter extends THistPainter { histo = this.getHisto(); return [this.getObjectHint(), - p.swapXY - ? 'y = ' + funcs.axisAsText('y', histo.fYaxis.GetBinLowEdge(p.bin+1)) - : 'x = ' + funcs.axisAsText('x', histo.fXaxis.GetBinLowEdge(p.bin+1)), + p.swapXY ? 'y = ' + funcs.axisAsText('y', histo.fYaxis.GetBinLowEdge(p.bin+1)) + : 'x = ' + funcs.axisAsText('x', histo.fXaxis.GetBinLowEdge(p.bin+1)), 'm-25% = ' + floatToString(p.fBoxDown, gStyle.fStatFormat), 'median = ' + floatToString(p.fMedian, gStyle.fStatFormat), 'm+25% = ' + floatToString(p.fBoxUp, gStyle.fStatFormat)]; @@ -3157,13 +3156,16 @@ class TH2Painter extends THistPainter { return null; } - const res = { name: histo.fName, title: histo.fTitle, - x: pnt.x, y: pnt.y, - color1: this.lineatt?.color ?? 'green', - color2: this.fillatt?.getFillColorAlt('blue') ?? 'blue', - lines: this.getBinTooltips(i, j), exact: true, menu: true }; + const res = { + name: histo.fName, title: histo.fTitle, + x: pnt.x, y: pnt.y, + color1: this.lineatt?.color ?? 'green', + color2: this.fillatt?.getFillColorAlt('blue') ?? 'blue', + lines: this.getBinTooltips(i, j), exact: true, menu: true + }; - if (this.options.Color) res.color2 = this.getHistPalette().getColor(colindx); + if (this.options.Color) + res.color2 = this.getHistPalette().getColor(colindx); if (pnt.disabled && !this.is_projection) { ttrect.remove(); diff --git a/js/modules/hist2d/THistPainter.mjs b/js/modules/hist2d/THistPainter.mjs index 09dc64672fbf7..395287a2eb7de 100644 --- a/js/modules/hist2d/THistPainter.mjs +++ b/js/modules/hist2d/THistPainter.mjs @@ -69,6 +69,9 @@ class THistDrawOptions { /** @summary Is palette can be used with current draw options */ canHavePalette() { + if (this.ndim === 3) + return this.BoxStyle === 12 || this.BoxStyle === 13 || this.GLBox === 12; + if (this.ndim !== 2) return false; @@ -163,7 +166,7 @@ class THistDrawOptions { if (d.check('OPTSTAT', true)) this.optstat = d.partAsInt(); if (d.check('OPTFIT', true)) this.optfit = d.partAsInt(); - if ((this.optstat || this.optstat) && histo?.TestBit(kNoStats)) + if ((this.optstat || this.optfit) && histo?.TestBit(kNoStats)) histo?.InvertBit(kNoStats); if (d.check('NOSTAT')) this.NoStat = true; @@ -1886,6 +1889,7 @@ class THistPainter extends ObjectPainter { case 'ToggleLogY': return fp.toggleAxisLog('y'); case 'ToggleLogZ': return fp.toggleAxisLog('z'); case 'ToggleStatBox': return getPromise(this.toggleStat()); + case 'ToggleColorZ': return this.toggleColz(); } return false; } @@ -2186,7 +2190,7 @@ class THistPainter extends ObjectPainter { // TODO: use weak reference (via pad list of painters and any kind of string) pal.$main_painter = this; - let arg = '', pr; + let arg = 'bring_stats_front', pr; if (postpone_draw) arg += ';postpone'; if (can_move && !this.do_redraw_palette) arg += ';can_move'; if (this.options.Cjust) arg += ';cjust'; @@ -2419,8 +2423,8 @@ class THistPainter extends ObjectPainter { } // find min/max values in selected range - - this.maxbin = this.minbin = this.minposbin = null; + let is_first = true; + this.minposbin = 0; for (i = res.i1; i < res.i2; ++i) { for (j = res.j1; j < res.j2; ++j) { @@ -2430,20 +2434,24 @@ class THistPainter extends ObjectPainter { binarea = (res.grx[i+1]-res.grx[i])*(res.gry[j]-res.gry[j+1]); if (binarea <= 0) continue; res.max = Math.max(res.max, binz); - if ((binz > 0) && ((binz 0) && ((binz < res.min) || (res.min === 0))) res.min = binz; binz = binz/binarea; } - if (this.maxbin === null) + if (is_first) { this.maxbin = this.minbin = binz; - else { + is_first = false; + } else { this.maxbin = Math.max(this.maxbin, binz); this.minbin = Math.min(this.minbin, binz); } - if (binz > 0) - if ((this.minposbin === null) || (binz < this.minposbin)) this.minposbin = binz; + if ((binz > 0) && ((this.minposbin === 0) || (binz < this.minposbin))) + this.minposbin = binz; } } + if (is_first) + this.maxbin = this.minbin = 0; + // force recalculation of z levels this.fContour = null; diff --git a/main/src/hadd.cxx b/main/src/hadd.cxx index 26fb44f021172..933859f3941a6 100644 --- a/main/src/hadd.cxx +++ b/main/src/hadd.cxx @@ -427,8 +427,10 @@ int main( int argc, char **argv ) else newcomp = ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault; delete firstInput; + fileMerger.SetMergeOptions(TString("first_source_compression")); } else { newcomp = ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault; + fileMerger.SetMergeOptions(TString("default_compression")); } } if (verbosity > 1) { @@ -487,7 +489,7 @@ int main( int argc, char **argv ) } } merger.SetNotrees(noTrees); - merger.SetMergeOptions(cacheSize); + merger.SetMergeOptions(TString(merger.GetMergeOptions()) + " " + cacheSize); merger.SetIOFeatures(features); Bool_t status; if (append) diff --git a/math/matrix/inc/TVectorT.h b/math/matrix/inc/TVectorT.h index a2c44f9eddcec..75866dcc9fa8f 100644 --- a/math/matrix/inc/TVectorT.h +++ b/math/matrix/inc/TVectorT.h @@ -24,6 +24,8 @@ #include "TMatrixTSym.h" #include "TMatrixTSparse.h" +#include + template class TVectorT : public TObject { protected: @@ -76,6 +78,26 @@ template class TVectorT : public TObject { inline Element *GetMatrixArray () { return fElements; } inline const Element *GetMatrixArray () const { return fElements; } + // For compatibility with STL classes + inline std::size_t size() const { return fNrows; } + + inline Element *data() { return fElements; } + inline const Element *data() const { return fElements; } + + // Implicit conversion of TVectorT to std::span, both non-const and const + // version. Can be removed once the minimum C++ standard in C++20, because + // then it's enough to implement the contiguous_range and sized_range + // concepts. This should alredy be the case since data() and size() are + // available. + inline operator std::span() + { + return std::span{data(), size()}; + } + inline operator std::span() const + { + return std::span{data(), size()}; + } + inline void Invalidate () { SetBit(kStatus); } inline void MakeValid () { ResetBit(kStatus); } inline Bool_t IsValid () const { return !TestBit(kStatus); } diff --git a/math/minuit2/src/CMakeLists.txt b/math/minuit2/src/CMakeLists.txt index ea09a7eb7089b..e4f8632bcb09f 100644 --- a/math/minuit2/src/CMakeLists.txt +++ b/math/minuit2/src/CMakeLists.txt @@ -33,7 +33,6 @@ set(MINUIT2_HEADERS FumiliStandardChi2FCN.h FumiliStandardMaximumLikelihoodFCN.h FunctionGradient.h - FunctionMinimizer.h FunctionMinimum.h GenericFunction.h GradientCalculator.h diff --git a/misc/minicern/src/cernlib.c b/misc/minicern/src/cernlib.c index b882db2d90d99..98f7282ce6034 100644 --- a/misc/minicern/src/cernlib.c +++ b/misc/minicern/src/cernlib.c @@ -172,8 +172,8 @@ int cfstati_(char *fname, int *info, int *lgname) #endif { struct stat buf; - char *ptname, *fchtak(); - int istat=-1, stat(); + char *ptname; + int istat = -1; ptname = fchtak(fname,*lgname); if (ptname == ((void *)0)) return -1; istat = stat(ptname, &buf); @@ -226,7 +226,7 @@ void cfopei_(int *lundes, int *medium, int *nwrec, int *mode, int *nbuf, char *ftext, int *astat, int *lgtx) #endif { - char *pttext, *fchtak(); + char *pttext; int flags = 0; int fildes; int perm; diff --git a/net/davix/src/RRawFileDavix.cxx b/net/davix/src/RRawFileDavix.cxx index 76373b9f4ccdc..da958fe0209d3 100644 --- a/net/davix/src/RRawFileDavix.cxx +++ b/net/davix/src/RRawFileDavix.cxx @@ -11,7 +11,9 @@ #include "ROOT/RRawFileDavix.hxx" +#include #include +#include "utils.h" #include #include @@ -28,7 +30,14 @@ namespace ROOT { namespace Internal { struct RDavixFileDes { - RDavixFileDes() : fd(nullptr), pos(&ctx) {} + + RDavixFileDes() : fd(nullptr), pos(&ctx) + { + // CA Check + const auto ca_check_local_str = gEnv->GetValue("Davix.GSI.CACheck", (const char *)"y"); + bool ca_check_local = isno(ca_check_local_str); + pars.setSSLCAcheck(ca_check_local); + } RDavixFileDes(const RDavixFileDes &) = delete; RDavixFileDes &operator=(const RDavixFileDes &) = delete; ~RDavixFileDes() = default; @@ -36,6 +45,7 @@ struct RDavixFileDes { DAVIX_FD *fd; Davix::Context ctx; Davix::DavPosix pos; + Davix::RequestParams pars; }; } // namespace Internal @@ -62,7 +72,7 @@ std::uint64_t ROOT::Internal::RRawFileDavix::GetSizeImpl() { struct stat buf; Davix::DavixError *err = nullptr; - if (fFileDes->pos.stat(nullptr, fUrl, &buf, &err) == -1) { + if (fFileDes->pos.stat(&fFileDes->pars, fUrl, &buf, &err) == -1) { throw std::runtime_error("Cannot determine size of '" + fUrl + "', error: " + err->getErrMsg()); } return buf.st_size; @@ -71,7 +81,7 @@ std::uint64_t ROOT::Internal::RRawFileDavix::GetSizeImpl() void ROOT::Internal::RRawFileDavix::OpenImpl() { Davix::DavixError *err = nullptr; - fFileDes->fd = fFileDes->pos.open(nullptr, fUrl, O_RDONLY, &err); + fFileDes->fd = fFileDes->pos.open(&fFileDes->pars, fUrl, O_RDONLY, &err); if (fFileDes->fd == nullptr) { throw std::runtime_error("Cannot open '" + fUrl + "', error: " + err->getErrMsg()); } diff --git a/net/davix/src/TDavixFile.cxx b/net/davix/src/TDavixFile.cxx index 1dd7e49dfe2ad..10c690d82f393 100644 --- a/net/davix/src/TDavixFile.cxx +++ b/net/davix/src/TDavixFile.cxx @@ -40,6 +40,7 @@ #include "TBase64.h" #include "TVirtualPerfStats.h" #include "TDavixFileInternal.h" +#include "utils.h" #include "snprintf.h" #include @@ -89,16 +90,6 @@ ROOT::Experimental::RLogChannel &TDavixLogChannel() //////////////////////////////////////////////////////////////////////////////// -bool isno(const char *str) -{ - if (!str) return false; - - if (!strcmp(str, "n") || !strcmp(str, "no") || !strcmp(str, "0") || !strcmp(str, "false")) return true; - - return false; - -} - bool strToBool(const char *str, bool defvalue) { if(!str) return defvalue; diff --git a/bindings/pyroot/pythonizations/src/PyROOTWrapper.h b/net/davix/src/utils.h similarity index 50% rename from bindings/pyroot/pythonizations/src/PyROOTWrapper.h rename to net/davix/src/utils.h index 72700835956e9..75ec7a9a14200 100644 --- a/bindings/pyroot/pythonizations/src/PyROOTWrapper.h +++ b/net/davix/src/utils.h @@ -1,27 +1,20 @@ -// Author: Enric Tejedor CERN 06/2018 -// Original PyROOT code by Wim Lavrijsen, LBL - /************************************************************************* - * Copyright (C) 1995-2018, Rene Brun and Fons Rademakers. * + * Copyright (C) 1995-2025, Rene Brun and Fons Rademakers. * * All rights reserved. * * * * For the licensing terms see $ROOTSYS/LICENSE. * * For the list of contributors see $ROOTSYS/README/CREDITS. * *************************************************************************/ -#ifndef PYROOT_ROOTWRAPPER_H -#define PYROOT_ROOTWRAPPER_H - -#include "Python.h" - -namespace PyROOT { - -// initialize ROOT -void Init(); +#include -// clean up all objects controlled by TMemoryRegulator -PyObject *ClearProxiedObjects(PyObject *self, PyObject *args); +inline bool isno(const char *str) +{ + if (!str) + return false; -} // namespace PyROOT + if (!strcmp(str, "n") || !strcmp(str, "no") || !strcmp(str, "0") || !strcmp(str, "false")) + return true; -#endif // !PYROOT_ROOTWRAPPER_H + return false; +} diff --git a/net/net/CMakeLists.txt b/net/net/CMakeLists.txt index f7612e60a416f..ff871b1934bf9 100644 --- a/net/net/CMakeLists.txt +++ b/net/net/CMakeLists.txt @@ -108,3 +108,5 @@ if(ssl) target_include_directories(Net PRIVATE ${OPENSSL_INCLUDE_DIR}) target_link_libraries(Net PRIVATE ${OPENSSL_LIBRARIES}) endif() + +ROOT_ADD_TEST_SUBDIRECTORY(test) \ No newline at end of file diff --git a/net/net/inc/TParallelMergingFile.h b/net/net/inc/TParallelMergingFile.h index a9c6447a1a909..18715c65e6f8e 100644 --- a/net/net/inc/TParallelMergingFile.h +++ b/net/net/inc/TParallelMergingFile.h @@ -55,6 +55,8 @@ class TParallelMergingFile : public TMemFile Int_t Write(const char *name=nullptr, Int_t opt=0, Int_t bufsiz=0) const override; void WriteStreamerInfo() override; + Int_t GetServerIdx() const { return fServerIdx; } + ClassDefOverride(TParallelMergingFile, 0); // TFile specialization that will semi-automatically upload its content to a merging server. }; diff --git a/net/net/inc/TServerSocket.h b/net/net/inc/TServerSocket.h index 44cd8c9e0a93d..afe48b7d120d7 100644 --- a/net/net/inc/TServerSocket.h +++ b/net/net/inc/TServerSocket.h @@ -55,8 +55,8 @@ class TServerSocket : public TSocket { public: enum { kDefaultBacklog = 10 }; - TServerSocket(Int_t port, Bool_t reuse = kFALSE, Int_t backlog = kDefaultBacklog, - Int_t tcpwindowsize = -1); + TServerSocket(Int_t port, Bool_t reuse = kFALSE, Int_t backlog = kDefaultBacklog, Int_t tcpwindowsize = -1, + ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny); TServerSocket(const char *service, Bool_t reuse = kFALSE, Int_t backlog = kDefaultBacklog, Int_t tcpwindowsize = -1); virtual ~TServerSocket(); diff --git a/net/net/inc/TSocket.h b/net/net/inc/TSocket.h index 18d2658226156..6f4fcbee79339 100644 --- a/net/net/inc/TSocket.h +++ b/net/net/inc/TSocket.h @@ -72,7 +72,7 @@ friend class TSlave; // to be able to call SendHostAuth() TBits fBitsInfo; // bits array to mark TStreamerInfo classes already sent TList *fUUIDs; // list of TProcessIDs already sent through the socket - TVirtualMutex *fLastUsageMtx; // Protect last usage setting / reading + TVirtualMutex *fLastUsageMtx; // Protect last usage setting / reading TTimeStamp fLastUsage; // Time stamp of last usage static ULong64_t fgBytesRecv; // total bytes received by all socket objects diff --git a/net/net/src/TParallelMergingFile.cxx b/net/net/src/TParallelMergingFile.cxx index 9972209073082..ef94964d24ad6 100644 --- a/net/net/src/TParallelMergingFile.cxx +++ b/net/net/src/TParallelMergingFile.cxx @@ -77,20 +77,33 @@ Bool_t TParallelMergingFile::UploadAndReset() { // Open connection to server if (fSocket == 0) { - const char *host = fServerLocation.GetHost(); - Int_t port = fServerLocation.GetPort(); - if (host == 0 || host[0] == '\0') { - host = "localhost"; - } - if (port <= 0) { - port = 1095; - } - fSocket = new TSocket(host,port); - if (!fSocket->IsValid()) { - Error("UploadAndReset","Could not contact the server %s:%d\n",host,port); - delete fSocket; - fSocket = 0; - return kFALSE; + const char *path = fServerLocation.GetFile(); + if (path && strlen(path) > 0 && path[0] == '/') { + // UNIX domain socket + fSocket = new TSocket(path); + if (!fSocket->IsValid()) { + Error("UploadAndReset", "Could not contact the server %s\n", path); + delete fSocket; + fSocket = 0; + return kFALSE; + } + } else { + // TCP socket + const char *host = fServerLocation.GetHost(); + Int_t port = fServerLocation.GetPort(); + if (host == 0 || host[0] == '\0') { + host = "localhost"; + } + if (port <= 0) { + port = 1095; + } + fSocket = new TSocket(host, port); + if (!fSocket->IsValid()) { + Error("UploadAndReset", "Could not contact the server %s:%d\n", host, port); + delete fSocket; + fSocket = 0; + return kFALSE; + } } // Wait till we get the start message // server tells us who we are diff --git a/net/net/src/TServerSocket.cxx b/net/net/src/TServerSocket.cxx index 4628aec844935..cb9e678817236 100644 --- a/net/net/src/TServerSocket.cxx +++ b/net/net/src/TServerSocket.cxx @@ -62,6 +62,8 @@ static void SetAuthOpt(UChar_t &opt, UChar_t mod) /// Use tcpwindowsize to specify the size of the receive buffer, it has /// to be specified here to make sure the window scale option is set (for /// tcpwindowsize > 65KB and for platforms supporting window scaling). +/// The socketBindOption parameter allows to specify how the socket will be +/// bound. See the documentation of ESocketBindOption for the details. /// Use IsValid() to check the validity of the /// server socket. In case server socket is not valid use GetErrorCode() /// to obtain the specific error value. These values are: @@ -105,7 +107,7 @@ TServerSocket::TServerSocket(const char *service, Bool_t reuse, Int_t backlog, fService = service; int port = gSystem->GetServiceByName(service); if (port != -1) { - fSocket = gSystem->AnnounceTcpService(port, reuse, backlog, tcpwindowsize); + fSocket = gSystem->AnnounceTcpService(port, reuse, backlog, tcpwindowsize, ESocketBindOption::kInaddrLoopback); if (fSocket >= 0) { R__LOCKGUARD(gROOTMutex); gROOT->GetListOfSockets()->Add(this); @@ -125,6 +127,8 @@ TServerSocket::TServerSocket(const char *service, Bool_t reuse, Int_t backlog, /// Use tcpwindowsize to specify the size of the receive buffer, it has /// to be specified here to make sure the window scale option is set (for /// tcpwindowsize > 65KB and for platforms supporting window scaling). +/// The socketBindOption parameter allows to specify how the socket will be +/// bound. See the documentation of ESocketBindOption for the details. /// Use IsValid() to check the validity of the /// server socket. In case server socket is not valid use GetErrorCode() /// to obtain the specific error value. These values are: @@ -136,8 +140,8 @@ TServerSocket::TServerSocket(const char *service, Bool_t reuse, Int_t backlog, /// will make sure that any open sockets are properly closed on /// program termination. -TServerSocket::TServerSocket(Int_t port, Bool_t reuse, Int_t backlog, - Int_t tcpwindowsize) +TServerSocket::TServerSocket(Int_t port, Bool_t reuse, Int_t backlog, Int_t tcpwindowsize, + ESocketBindOption socketBindOption) { R__ASSERT(gROOT); R__ASSERT(gSystem); @@ -149,7 +153,7 @@ TServerSocket::TServerSocket(Int_t port, Bool_t reuse, Int_t backlog, fService = gSystem->GetServiceByPort(port); SetTitle(fService); - fSocket = gSystem->AnnounceTcpService(port, reuse, backlog, tcpwindowsize); + fSocket = gSystem->AnnounceTcpService(port, reuse, backlog, tcpwindowsize, socketBindOption); if (fSocket >= 0) { R__LOCKGUARD(gROOTMutex); gROOT->GetListOfSockets()->Add(this); diff --git a/net/net/test/CMakeLists.txt b/net/net/test/CMakeLists.txt new file mode 100644 index 0000000000000..83fe3a4ea963d --- /dev/null +++ b/net/net/test/CMakeLists.txt @@ -0,0 +1,7 @@ +# Copyright (C) 1995-2025, Rene Brun and Fons Rademakers. +# All rights reserved. +# +# For the licensing terms see $ROOTSYS/LICENSE. +# For the list of contributors see $ROOTSYS/README/CREDITS. + +ROOT_ADD_GTEST(nettests nettests.cxx LIBRARIES Net) diff --git a/net/net/test/nettests.cxx b/net/net/test/nettests.cxx new file mode 100644 index 0000000000000..f4801f3f988c4 --- /dev/null +++ b/net/net/test/nettests.cxx @@ -0,0 +1,14 @@ +#include "TServerSocket.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +TEST(TServerSocket, SocketBinding) +{ + // The socket is 0 to let ROOT find a free port for this test + TServerSocket theSocket(0, false, TServerSocket::kDefaultBacklog, -1, ESocketBindOption::kInaddrLoopback); + const auto addr = theSocket.GetLocalInetAddress().GetHostAddress(); + const auto expectedAddr = "0.0.0.0"; + ASSERT_THAT(addr, ::testing::StrNe(expectedAddr)) + << "The address is " << addr << " while the expected one must be different from " << expectedAddr; +} diff --git a/roofit/CMakeLists.txt b/roofit/CMakeLists.txt index 791a77b842206..57b460722c7b4 100644 --- a/roofit/CMakeLists.txt +++ b/roofit/CMakeLists.txt @@ -24,10 +24,6 @@ if(roofit_legacy_eval_backend AND NOT MSVC) add_subdirectory(xroofit) endif() -generateManual(hist2workspaceMan - ${CMAKE_CURRENT_SOURCE_DIR}/histfactory/src/hist2workspace-argparse.py - ${CMAKE_BINARY_DIR}/man/hist2workspace.1) - set(roofit_etc_files etc/HistFactorySchema.dtd etc/RooFitHS3_wsfactoryexpressions.json diff --git a/roofit/histfactory/CMakeLists.txt b/roofit/histfactory/CMakeLists.txt index 3712191265e97..b483a21e8440e 100644 --- a/roofit/histfactory/CMakeLists.txt +++ b/roofit/histfactory/CMakeLists.txt @@ -54,7 +54,6 @@ ROOT_STANDARD_LIBRARY_PACKAGE(HistFactory src/HistFactoryNavigation.cxx src/HistRef.cxx src/HistoToWorkspaceFactoryFast.cxx - src/JSONTool.cxx src/LinInterpVar.cxx src/MakeModelAndMeasurementsFast.cxx src/Measurement.cxx @@ -81,7 +80,6 @@ ROOT_STANDARD_LIBRARY_PACKAGE(HistFactory Graf Gpad RooStats - RooFitJSONInterface ${EXTRA_DICT_OPTS} ) @@ -96,13 +94,6 @@ endif() # can't build hist2workspace without xml. if(xml) ROOT_EXECUTABLE(hist2workspace hist2workspace.cxx LIBRARIES HistFactory) - - #---Createhist2workspaceCommandLineOptions------------------------------------------------------------------ - generateHeader(hist2workspace - ${CMAKE_CURRENT_SOURCE_DIR}/src/hist2workspace-argparse.py - ${CMAKE_BINARY_DIR}/ginclude/hist2workspaceCommandLineOptionsHelp.h - ) - target_compile_definitions(HistFactory PUBLIC HISTFACTORY_XML) endif() diff --git a/roofit/histfactory/inc/RooStats/HistFactory/FlexibleInterpVar.h b/roofit/histfactory/inc/RooStats/HistFactory/FlexibleInterpVar.h index 7fbdf1ddacb30..46285e2f60b68 100644 --- a/roofit/histfactory/inc/RooStats/HistFactory/FlexibleInterpVar.h +++ b/roofit/histfactory/inc/RooStats/HistFactory/FlexibleInterpVar.h @@ -71,6 +71,10 @@ namespace HistFactory{ double evaluate() const override; + private: + + void setInterpCodeForParam(int iParam, int code); + ClassDefOverride(RooStats::HistFactory::FlexibleInterpVar,2); // flexible interpolation }; } diff --git a/roofit/histfactory/inc/RooStats/HistFactory/PiecewiseInterpolation.h b/roofit/histfactory/inc/RooStats/HistFactory/PiecewiseInterpolation.h index cd47e4dcd12c4..5676b7f4d1c1d 100644 --- a/roofit/histfactory/inc/RooStats/HistFactory/PiecewiseInterpolation.h +++ b/roofit/histfactory/inc/RooStats/HistFactory/PiecewiseInterpolation.h @@ -61,7 +61,7 @@ class PiecewiseInterpolation : public RooAbsReal { void setPositiveDefinite(bool flag=true){_positiveDefinite=flag;} bool positiveDefinite() const {return _positiveDefinite;} - void setInterpCode(RooAbsReal& param, int code, bool silent=false); + void setInterpCode(RooAbsReal& param, int code, bool silent=true); void setAllInterpCodes(int code); void printAllInterpCodes(); @@ -102,6 +102,10 @@ class PiecewiseInterpolation : public RooAbsReal { double evaluate() const override; void doEval(RooFit::EvalContext &) const override; +private: + + void setInterpCodeForParam(int iParam, int code); + ClassDefOverride(PiecewiseInterpolation,4) // Sum of RooAbsReal objects }; diff --git a/roofit/histfactory/src/FlexibleInterpVar.cxx b/roofit/histfactory/src/FlexibleInterpVar.cxx index 11c97bdacec02..d65dfd2e64d01 100644 --- a/roofit/histfactory/src/FlexibleInterpVar.cxx +++ b/roofit/histfactory/src/FlexibleInterpVar.cxx @@ -54,10 +54,10 @@ FlexibleInterpVar::FlexibleInterpVar(const char* name, const char* title, FlexibleInterpVar::FlexibleInterpVar(const char* name, const char* title, const RooArgList& paramList, double argNominal, std::vector const& lowVec, std::vector const& highVec, - std::vector const& code) : + std::vector const& codes) : RooAbsReal(name, title), _paramList("paramList","List of paramficients",this), - _nominal(argNominal), _low(lowVec), _high(highVec), _interpCode(code) + _nominal(argNominal), _low(lowVec), _high(highVec) { for (auto param : paramList) { if (!dynamic_cast(param)) { @@ -69,6 +69,11 @@ FlexibleInterpVar::FlexibleInterpVar(const char* name, const char* title, _paramList.add(*param) ; } + _interpCode.resize(_paramList.size()); + for (std::size_t i = 0; i < codes.size(); ++i) { + setInterpCodeForParam(i, codes[i]); + } + if (_low.size() != _paramList.size() || _low.size() != _high.size() || _low.size() != _interpCode.size()) { coutE(InputArguments) << "FlexibleInterpVar::ctor(" << GetName() << ") invalid input std::vectors " << std::endl; R__ASSERT(_low.size() == _paramList.size()); @@ -109,31 +114,43 @@ FlexibleInterpVar::~FlexibleInterpVar() TRACE_DESTROY; } - -//////////////////////////////////////////////////////////////////////////////// - -void FlexibleInterpVar::setInterpCode(RooAbsReal& param, int code){ - int index = _paramList.index(¶m); - if(index<0){ - coutE(InputArguments) << "FlexibleInterpVar::setInterpCode ERROR: " << param.GetName() - << " is not in list" << std::endl; - } else if(_interpCode.at(index) != code){ - coutI(InputArguments) << "FlexibleInterpVar::setInterpCode : " << param.GetName() - << " is now " << code << std::endl; - _interpCode.at(index) = code; - // GHL: Adding suggestion by Swagato: - setValueDirty(); - } +void FlexibleInterpVar::setInterpCode(RooAbsReal ¶m, int code) +{ + int index = _paramList.index(¶m); + if (index < 0) { + coutE(InputArguments) << "FlexibleInterpVar::setInterpCode ERROR: " << param.GetName() << " is not in list" + << std::endl; + return; + } + setInterpCodeForParam(index, code); } -//////////////////////////////////////////////////////////////////////////////// +void FlexibleInterpVar::setAllInterpCodes(int code) +{ + for (std::size_t i = 0; i < _interpCode.size(); ++i) { + setInterpCodeForParam(i, code); + } +} -void FlexibleInterpVar::setAllInterpCodes(int code){ - for(unsigned int i=0; i<_interpCode.size(); ++i){ - _interpCode.at(i) = code; - } - // GHL: Adding suggestion by Swagato: - setValueDirty(); +void FlexibleInterpVar::setInterpCodeForParam(int iParam, int code) +{ + RooAbsArg const ¶m = _paramList[iParam]; + if (code < 0 || code > 5) { + coutE(InputArguments) << "FlexibleInterpVar::setInterpCode ERROR: " << param.GetName() + << " with unknown interpolation code " << code << ", keeping current code " + << _interpCode[iParam] << std::endl; + return; + } + if (code == 3) { + // In the past, code 3 was equivalent to code 2, which confused users. + // Now, we just say that code 3 doesn't exist and default to code 2 in + // that case for backwards compatible behavior. + coutE(InputArguments) << "FlexibleInterpVar::setInterpCode ERROR: " << param.GetName() + << " with unknown interpolation code " << code << ", defaulting to code 2" << std::endl; + code = 2; + } + _interpCode.at(iParam) = code; + setValueDirty(); } //////////////////////////////////////////////////////////////////////////////// @@ -198,10 +215,6 @@ double FlexibleInterpVar::evaluate() const double total(_nominal); for (std::size_t i = 0; i < _paramList.size(); ++i) { int code = _interpCode[i]; - if (code < 0 || code > 4) { - coutE(InputArguments) << "FlexibleInterpVar::evaluate ERROR: param " << i - << " with unknown interpolation code" << std::endl; - } // To get consistent codes with the PiecewiseInterpolation if (code == 4) { code = 5; @@ -223,10 +236,6 @@ void FlexibleInterpVar::translate(RooFit::Detail::CodeSquashContext &ctx) const unsigned int n = _interpCode.size(); int interpCode = _interpCode[0]; - if (interpCode < 0 || interpCode > 4) { - coutE(InputArguments) << "FlexibleInterpVar::evaluate ERROR: param " << 0 - << " with unknown interpolation code" << std::endl; - } // To get consistent codes with the PiecewiseInterpolation if (interpCode == 4) { interpCode = 5; @@ -251,10 +260,6 @@ void FlexibleInterpVar::doEval(RooFit::EvalContext &ctx) const for (std::size_t i = 0; i < _paramList.size(); ++i) { int code = _interpCode[i]; - if (code < 0 || code > 4) { - coutE(InputArguments) << "FlexibleInterpVar::evaluate ERROR: param " << i - << " with unknown interpolation code" << std::endl; - } // To get consistent codes with the PiecewiseInterpolation if (code == 4) { code = 5; diff --git a/roofit/histfactory/src/HistoToWorkspaceFactoryFast.cxx b/roofit/histfactory/src/HistoToWorkspaceFactoryFast.cxx index ef2eb70aacd18..d14eda1391097 100644 --- a/roofit/histfactory/src/HistoToWorkspaceFactoryFast.cxx +++ b/roofit/histfactory/src/HistoToWorkspaceFactoryFast.cxx @@ -603,7 +603,7 @@ RooArgList HistoToWorkspaceFactoryFast::createObservables(const TH1 *hist, RooWo assert(lowVec.size() == params.size()); FlexibleInterpVar interp( (interpName).c_str(), "", params, 1., lowVec, highVec); - interp.setAllInterpCodes(4); // LM: change to 4 (piece-wise linear to 6th order polynomial interpolation + linear extrapolation ) + interp.setAllInterpCodes(4); // LM: change to 4 (piece-wise exponential to 6th order polynomial interpolation + exponential extrapolation ) //interp.setAllInterpCodes(0); // simple linear interpolation proto.import(interp); // params have already been imported in first loop of this function } else{ diff --git a/roofit/histfactory/src/JSONTool.cxx b/roofit/histfactory/src/JSONTool.cxx deleted file mode 100644 index 9ec151c1d7c93..0000000000000 --- a/roofit/histfactory/src/JSONTool.cxx +++ /dev/null @@ -1,514 +0,0 @@ -/// \cond ROOFIT_INTERNAL - -/* - * Project: RooFit - * Authors: - * Carsten D. Burgard, DESY/ATLAS, Dec 2021 - * - * Copyright (c) 2022, CERN - * - * Redistribution and use in source and binary forms, - * with or without modification, are permitted according to the terms - * listed in LICENSE (http://roofit.sourceforge.net/license.txt) - */ - -/** \class RooStats::HistFactory::JSONTool - * \ingroup HistFactory -The RooStats::HistFactory::JSONTool can be used to export a HistFactory -measurement to HS3 JSON. It is not part of the public user interface, but a -pretty useful tool for unit test, validating if a measurement object can be -directly translated to HS3 without going over RooFit. If this translation turns -out to be important for users, it can be considered in the future to make the -class part of the public interface. -*/ - -#include "./JSONTool.h" - -#include - -#include -#include -#include - -#include -#include - -using RooFit::Detail::JSONNode; - -namespace { - -JSONNode &appendNamedChild(JSONNode &node, std::string const &name) -{ - static constexpr bool useListsInsteadOfDicts = true; - - if (!useListsInsteadOfDicts) { - return node.set_map()[name].set_map(); - } - JSONNode &child = node.set_seq().append_child().set_map(); - child["name"] << name; - return child; -} - -class Domains { -public: - void readVariable(const char *name, double min, double max); - - void writeJSON(RooFit::Detail::JSONNode &) const; - -private: - class ProductDomain { - public: - void readVariable(const char *name, double min, double max); - - void writeJSON(RooFit::Detail::JSONNode &) const; - - private: - struct ProductDomainElement { - double min = 0.0; - double max = 0.0; - }; - - std::map _map; - }; - - std::map _map; -}; - -void Domains::readVariable(const char *name, double min, double max) -{ - _map["default_domain"].readVariable(name, min, max); -} - -void Domains::writeJSON(RooFit::Detail::JSONNode &node) const -{ - for (auto const &domain : _map) { - domain.second.writeJSON(appendNamedChild(node, domain.first)); - } -} -void Domains::ProductDomain::readVariable(const char *name, double min, double max) -{ - auto &elem = _map[name]; - - elem.min = min; - elem.max = max; -} -void Domains::ProductDomain::writeJSON(RooFit::Detail::JSONNode &node) const -{ - node.set_map(); - node["type"] << "product_domain"; - - auto &variablesNode = node["axes"]; - - for (auto const &item : _map) { - auto const &elem = item.second; - RooFit::Detail::JSONNode &varnode = appendNamedChild(variablesNode, item.first); - varnode["min"] << elem.min; - varnode["max"] << elem.max; - } -} - -bool checkRegularBins(const TAxis &ax) -{ - double w = ax.GetXmax() - ax.GetXmin(); - double bw = w / ax.GetNbins(); - for (int i = 0; i <= ax.GetNbins(); ++i) { - if (std::abs(ax.GetBinUpEdge(i) - (ax.GetXmin() + (bw * i))) > w * 1e-6) - return false; - } - return true; -} - -inline void writeAxis(JSONNode &axis, const TAxis &ax) -{ - bool regular = (!ax.IsVariableBinSize()) || checkRegularBins(ax); - axis.set_map(); - if (regular) { - axis["nbins"] << ax.GetNbins(); - axis["min"] << ax.GetXmin(); - axis["max"] << ax.GetXmax(); - } else { - auto &edges = axis["edges"]; - edges.set_seq(); - for (int i = 0; i <= ax.GetNbins(); ++i) { - edges.append_child() << ax.GetBinUpEdge(i); - } - } -} - -std::vector getObsnames(RooStats::HistFactory::Channel const &c) -{ - std::vector obsnames{"obs_x_" + c.GetName(), "obs_y_" + c.GetName(), "obs_z_" + c.GetName()}; - obsnames.resize(c.GetData().GetHisto()->GetDimension()); - return obsnames; -} - -void writeObservables(const TH1 &h, JSONNode &node, const std::vector &varnames) -{ - // axes need to be ordered, so this is a sequence and not a map - auto &observables = node["axes"].set_seq(); - auto &x = observables.append_child().set_map(); - x["name"] << varnames[0]; - writeAxis(x, *h.GetXaxis()); - if (h.GetDimension() > 1) { - auto &y = observables.append_child().set_map(); - y["name"] << varnames[1]; - writeAxis(y, *(h.GetYaxis())); - if (h.GetDimension() > 2) { - auto &z = observables.append_child().set_map(); - z["name"] << varnames[2]; - writeAxis(z, *(h.GetZaxis())); - } - } -} - -void exportSimpleHistogram(const TH1 &histo, JSONNode &node) -{ - node.set_seq(); - const int nBins = histo.GetNbinsX() * histo.GetNbinsY() * histo.GetNbinsZ(); - for (int i = 1; i <= nBins; ++i) { - const double val = histo.GetBinContent(i); - node.append_child() << val; - } -} - -void exportHistogram(const TH1 &histo, JSONNode &node, const std::vector &varnames, - const TH1 *errH = nullptr, bool doWriteObservables = true, bool writeErrors = true) -{ - node.set_map(); - auto &weights = node["contents"].set_seq(); - JSONNode *errors = nullptr; - if (writeErrors) { - errors = &node["errors"].set_seq(); - } - if (doWriteObservables) { - writeObservables(histo, node, varnames); - } - const int nBins = histo.GetNbinsX() * histo.GetNbinsY() * histo.GetNbinsZ(); - for (int i = 1; i <= nBins; ++i) { - const double val = histo.GetBinContent(i); - weights.append_child() << val; - if (writeErrors) { - const double err = errH ? val * errH->GetBinContent(i) : histo.GetBinError(i); - errors->append_child() << err; - } - } -} - -void exportSample(const RooStats::HistFactory::Sample &sample, JSONNode &channelNode, - std::vector const &obsnames) -{ - auto &s = appendNamedChild(channelNode["samples"], sample.GetName()); - - if (!sample.GetOverallSysList().empty()) { - auto &modifiers = s["modifiers"].set_seq(); - for (const auto &sys : sample.GetOverallSysList()) { - auto &node = modifiers.append_child().set_map(); - node["name"] << sys.GetName(); - node["type"] << "normsys"; - auto &data = node["data"]; - data.set_map(); - data["lo"] << sys.GetLow(); - data["hi"] << sys.GetHigh(); - } - } - - if (!sample.GetNormFactorList().empty()) { - auto &modifiers = s["modifiers"].set_seq(); - for (const auto &nf : sample.GetNormFactorList()) { - auto &mod = modifiers.append_child().set_map(); - mod["name"] << nf.GetName(); - mod["type"] << "normfactor"; - } - auto &mod = modifiers.append_child().set_map(); - mod["name"] << "Lumi"; - mod["type"] << "normfactor"; - mod["constraint_name"] << "lumiConstraint"; - } - - if (!sample.GetHistoSysList().empty()) { - auto &modifiers = s["modifiers"].set_seq(); - for (size_t i = 0; i < sample.GetHistoSysList().size(); ++i) { - auto &sys = sample.GetHistoSysList()[i]; - auto &node = modifiers.append_child().set_map(); - node["name"] << sys.GetName(); - node["type"] << "histosys"; - auto &data = node["data"].set_map(); - exportSimpleHistogram(*sys.GetHistoLow(), data["lo"].set_map()["contents"]); - exportSimpleHistogram(*sys.GetHistoHigh(), data["hi"].set_map()["contents"]); - } - } - - if (!sample.GetShapeSysList().empty()) { - auto &modifiers = s["modifiers"].set_seq(); - for (size_t i = 0; i < sample.GetShapeSysList().size(); ++i) { - auto &sys = sample.GetShapeSysList()[i]; - auto &node = modifiers.append_child().set_map(); - node["name"] << sys.GetName(); - node["type"] << "shapesys"; - if (sys.GetConstraintType() == RooStats::HistFactory::Constraint::Gaussian) - node["constraint"] << "Gauss"; - if (sys.GetConstraintType() == RooStats::HistFactory::Constraint::Poisson) - node["constraint"] << "Poisson"; - auto &data = node["data"].set_map(); - exportSimpleHistogram(*sys.GetErrorHist(), data["vals"]); - } - } - - auto &tags = s["dict"].set_map(); - tags["normalizeByTheory"] << sample.GetNormalizeByTheory(); - - if (sample.GetStatError().GetActivate()) { - RooStats::HistFactory::JSONTool::activateStatError(s); - } - - auto &data = s["data"]; - const bool useStatError = sample.GetStatError().GetActivate() && sample.GetStatError().GetUseHisto(); - TH1 const *errH = useStatError ? sample.GetStatError().GetErrorHist() : nullptr; - - if (!channelNode.has_child("axes")) { - writeObservables(*sample.GetHisto(), channelNode, obsnames); - } - exportHistogram(*sample.GetHisto(), data, obsnames, errH, false); -} - -void exportChannel(const RooStats::HistFactory::Channel &c, JSONNode &ch) -{ - ch["type"] << "histfactory_dist"; - - auto &staterr = ch["statError"].set_map(); - staterr["relThreshold"] << c.GetStatErrorConfig().GetRelErrorThreshold(); - staterr["constraint"] << RooStats::HistFactory::Constraint::Name(c.GetStatErrorConfig().GetConstraintType()); - - const std::vector obsnames = getObsnames(c); - - for (const auto &s : c.GetSamples()) { - exportSample(s, ch, obsnames); - } -} - -void setAttribute(JSONNode &rootnode, const std::string &obj, const std::string &attrib) -{ - auto node = &rootnode.get("misc", "ROOT_internal", "attributes").set_map()[obj].set_map(); - auto &tags = (*node)["tags"]; - tags.set_seq(); - tags.append_child() << attrib; -} - -void exportMeasurement(RooStats::HistFactory::Measurement &measurement, JSONNode &rootnode, Domains &domains) -{ - using namespace RooStats::HistFactory; - - for (const auto &ch : measurement.GetChannels()) { - if (!ch.CheckHistograms()) - throw std::runtime_error("unable to export histograms, please call CollectHistograms first"); - } - - // preprocess functions - if (!measurement.GetFunctionObjects().empty()) { - auto &funclist = rootnode["functions"]; - for (const auto &func : measurement.GetFunctionObjects()) { - auto &f = appendNamedChild(funclist, func.GetName()); - f["name"] << func.GetName(); - f["expression"] << func.GetExpression(); - f["dependents"] << func.GetDependents(); - f["command"] << func.GetCommand(); - } - } - - auto &pdflist = rootnode["distributions"]; - - auto &analysisNode = appendNamedChild(rootnode["analyses"], "simPdf"); - analysisNode["domains"].set_seq().append_child() << "default_domain"; - - auto &analysisPois = analysisNode["parameters_of_interest"].set_seq(); - - for (const auto &poi : measurement.GetPOIList()) { - analysisPois.append_child() << poi; - } - - analysisNode["likelihood"] << measurement.GetName(); - - auto &likelihoodNode = appendNamedChild(rootnode["likelihoods"], measurement.GetName()); - likelihoodNode["distributions"].set_seq(); - likelihoodNode["data"].set_seq(); - - // the simpdf - for (const auto &c : measurement.GetChannels()) { - - auto pdfName = std::string("model_") + c.GetName(); - auto realSumPdfName = c.GetName() + std::string("_model"); - - likelihoodNode["distributions"].append_child() << pdfName; - likelihoodNode["data"].append_child() << std::string("obsData_") + c.GetName(); - exportChannel(c, appendNamedChild(pdflist, pdfName)); - setAttribute(rootnode, realSumPdfName, "BinnedLikelihood"); - } - - struct VariableInfo { - double val = 0.0; - double minVal = -5.0; - double maxVal = 5.0; - bool isConstant = false; - bool writeDomain = true; - }; - std::unordered_map variables; - - for (const auto &channel : measurement.GetChannels()) { - for (const auto &sample : channel.GetSamples()) { - for (const auto &norm : sample.GetNormFactorList()) { - auto &info = variables[norm.GetName()]; - info.val = norm.GetVal(); - info.minVal = norm.GetLow(); - info.maxVal = norm.GetHigh(); - } - for (const auto &sys : sample.GetOverallSysList()) { - variables[std::string("alpha_") + sys.GetName()] = VariableInfo{}; - } - } - } - for (const auto &sys : measurement.GetConstantParams()) { - auto &info = variables[sys]; - info.isConstant = true; - bool isGamma = sys.find("gamma_") != std::string::npos; - // Gammas are 1.0 by default, alphas are 0.0 - info.val = isGamma ? 1.0 : 0.0; - // For the gamma parameters, HistFactory will figure out the ranges - // itself based on the template bin contents and errors. - info.writeDomain = !isGamma; - } - - // the lumi variables - { - double nominal = measurement.GetLumi(); - double error = measurement.GetLumi() * measurement.GetLumiRelErr(); - - auto &info1 = variables["Lumi"]; - info1.val = nominal; - info1.minVal = 0; - info1.maxVal = 10 * nominal; - info1.isConstant = true; - - auto &info2 = variables["nominalLumi"]; - info2.val = nominal; - info2.minVal = 0; - info2.maxVal = nominal + 10 * error; - info2.isConstant = true; - } - - JSONNode &varlist = appendNamedChild(rootnode["parameter_points"], "default_values")["parameters"]; - for (auto const &item : variables) { - std::string const &parname = item.first; - VariableInfo const &info = item.second; - - auto &v = appendNamedChild(varlist, parname); - v["value"] << info.val; - if (info.isConstant) - v["const"] << true; - if (info.writeDomain) { - domains.readVariable(parname.c_str(), info.minVal, info.maxVal); - } - } - - // the data - auto &child1 = rootnode.get("misc", "ROOT_internal", "combined_datasets").set_map()["obsData"].set_map(); - auto &child2 = rootnode.get("misc", "ROOT_internal", "combined_distributions").set_map()["simPdf"].set_map(); - - child1["index_cat"] << "channelCat"; - auto &labels1 = child1["labels"].set_seq(); - auto &indices1 = child1["indices"].set_seq(); - - child2["index_cat"] << "channelCat"; - auto &labels2 = child2["labels"].set_seq(); - auto &indices2 = child2["indices"].set_seq(); - auto &pdfs2 = child2["distributions"].set_seq(); - - std::vector channelNames; - for (const auto &c : measurement.GetChannels()) { - labels1.append_child() << c.GetName(); - indices1.append_child() << int(channelNames.size()); - labels2.append_child() << c.GetName(); - indices2.append_child() << int(channelNames.size()); - pdfs2.append_child() << (std::string("model_") + c.GetName()); - - JSONNode &dataOutput = appendNamedChild(rootnode["data"], std::string("obsData_") + c.GetName()); - dataOutput["type"] << "binned"; - - exportHistogram(*c.GetData().GetHisto(), dataOutput, getObsnames(c)); - channelNames.push_back(c.GetName()); - } - - auto &modelConfigAux = rootnode.get("misc", "ROOT_internal", "ModelConfigs", "simPdf").set_map(); - modelConfigAux["combined_data_name"] << "obsData"; - modelConfigAux["pdfName"] << "simPdf"; - modelConfigAux["mcName"] << "ModelConfig"; - - // Finally write lumi constraint - auto &lumiConstraint = appendNamedChild(pdflist, "lumiConstraint"); - lumiConstraint["mean"] << "nominalLumi"; - lumiConstraint["sigma"] << (measurement.GetLumi() * measurement.GetLumiRelErr()); - lumiConstraint["type"] << "gaussian_dist"; - lumiConstraint["x"] << "Lumi"; -} - -std::unique_ptr createNewJSONTree() -{ - std::unique_ptr tree = RooFit::Detail::JSONTree::create(); - JSONNode &n = tree->rootnode(); - n.set_map(); - auto &metadata = n["metadata"].set_map(); - - // add the mandatory hs3 version number - metadata["hs3_version"] << "0.1.90"; - - // Add information about the ROOT version that was used to generate this file - auto &rootInfo = appendNamedChild(metadata["packages"], "ROOT"); - std::string versionName = gROOT->GetVersion(); - // We want to consistently use dots such that the version name can be easily - // digested automatically. - std::replace(versionName.begin(), versionName.end(), '/', '.'); - rootInfo["version"] << versionName; - - return tree; -} - -} // namespace - -void RooStats::HistFactory::JSONTool::PrintJSON(std::ostream &os) -{ - std::unique_ptr tree = createNewJSONTree(); - auto &rootnode = tree->rootnode(); - Domains domains; - exportMeasurement(_measurement, rootnode, domains); - domains.writeJSON(rootnode["domains"]); - rootnode.writeJSON(os); -} -void RooStats::HistFactory::JSONTool::PrintJSON(std::string const &filename) -{ - std::ofstream out(filename); - this->PrintJSON(out); -} - -void RooStats::HistFactory::JSONTool::PrintYAML(std::ostream &os) -{ - std::unique_ptr tree = createNewJSONTree(); - auto &rootnode = tree->rootnode().set_map(); - Domains domains; - exportMeasurement(_measurement, rootnode, domains); - domains.writeJSON(rootnode["domains"]); - rootnode.writeYML(os); -} - -void RooStats::HistFactory::JSONTool::PrintYAML(std::string const &filename) -{ - std::ofstream out(filename); - this->PrintYAML(out); -} - -void RooStats::HistFactory::JSONTool::activateStatError(JSONNode &sampleNode) -{ - auto &node = sampleNode["modifiers"].set_seq().append_child().set_map(); - node["name"] << "mcstat"; - node["type"] << "staterror"; -} - -/// \endcond diff --git a/roofit/histfactory/src/JSONTool.h b/roofit/histfactory/src/JSONTool.h deleted file mode 100644 index b52fa029a3f35..0000000000000 --- a/roofit/histfactory/src/JSONTool.h +++ /dev/null @@ -1,52 +0,0 @@ -/// \cond ROOFIT_INTERNAL - -/* - * Project: RooFit - * Authors: - * Carsten D. Burgard, DESY/ATLAS, Dec 2021 - * - * Copyright (c) 2023, CERN - * - * Redistribution and use in source and binary forms, - * with or without modification, are permitted according to the terms - * listed in LICENSE (http://roofit.sourceforge.net/license.txt) - */ - -#ifndef RooStats_HistFactory_JSONTool_h -#define RooStats_HistFactory_JSONTool_h - -#include -#include - -namespace RooFit { -namespace Detail { -class JSONNode; -} // namespace Detail -} // namespace RooFit - -namespace RooStats { -namespace HistFactory { - -class Measurement; - -class JSONTool { -public: - JSONTool(RooStats::HistFactory::Measurement &m) : _measurement(m) {} - - void PrintJSON(std::ostream &os = std::cout); - void PrintJSON(std::string const &filename); - void PrintYAML(std::ostream &os = std::cout); - void PrintYAML(std::string const &filename); - - static void activateStatError(RooFit::Detail::JSONNode &sampleNode); - -private: - RooStats::HistFactory::Measurement &_measurement; -}; - -} // namespace HistFactory -} // namespace RooStats - -#endif - -/// \endcond diff --git a/roofit/histfactory/src/ParamHistFunc.cxx b/roofit/histfactory/src/ParamHistFunc.cxx index 94321ca0f28f9..ff5dcdca3dc0e 100644 --- a/roofit/histfactory/src/ParamHistFunc.cxx +++ b/roofit/histfactory/src/ParamHistFunc.cxx @@ -569,7 +569,7 @@ void ParamHistFunc::translate(RooFit::Detail::CodeSquashContext &ctx) const _numBinsPerDim = getNumBinsPerDim(_dataVars); } - std::string const &idx = _dataSet.calculateTreeIndexForCodeSquash(this, ctx, _dataVars, true); + std::string const &idx = _dataSet.calculateTreeIndexForCodeSquash(ctx, _dataVars, true); std::string const ¶mNames = ctx.buildArg(_paramSet); ctx.addResult(this, paramNames + "[" + idx + "]"); diff --git a/roofit/histfactory/src/PiecewiseInterpolation.cxx b/roofit/histfactory/src/PiecewiseInterpolation.cxx index 3810a8b4132a6..293fc8cb873b1 100644 --- a/roofit/histfactory/src/PiecewiseInterpolation.cxx +++ b/roofit/histfactory/src/PiecewiseInterpolation.cxx @@ -1,22 +1,40 @@ /** \class PiecewiseInterpolation - * \ingroup HistFactory - * The PiecewiseInterpolation is a class that can morph distributions into each other, which - * is useful to estimate systematic uncertainties. Given a nominal distribution and one or - * more altered or distorted ones, it computes a new shape depending on the value of the nuisance - * parameters \f$ \alpha_i \f$: - * \f[ - * A = \sum_i \mathrm{Interpolate}(\mathrm{low}_i, \mathrm{nominal}, \mathrm{high}_i, \alpha_i). - * \f] - * If an \f$ \alpha_i \f$ is zero, the distribution is identical to the nominal distribution, at - * \f$ \pm 1 \f$ it is identical to the up/down distribution for that specific \f$ i \f$. - * - * The class supports several interpolation methods, which can be selected for each parameter separately - * using setInterpCode(). The default interpolation code is 4. This performs - * - \f$ |\alpha | > 1 \f$: Linear extrapolation. - * - \f$ |\alpha | < 1 \f$: Polynomial interpolation. A sixth-order polynomial is used. Its coefficients - * are chosen such that function, first, and second derivative at \f$ \alpha \pm 1 \f$ match the values - * that the extrapolation procedure uses. - */ +* \ingroup HistFactory +* The PiecewiseInterpolation is a class that can morph distributions into each other, which +* is useful to estimate systematic uncertainties. Given a nominal distribution and one or +* more altered or distorted ones, it computes a new shape depending on the value of the nuisance +* parameters \f$ \theta_i \f$: +* \f[ +* A = \mathrm{nominal} + \sum_i I_i(\theta_i;\mathrm{low}_i, \mathrm{nominal}, \mathrm{high}_i). +* \f] +* for additive interpolation modes (interpCodes 0, 2, 3, and 4), or: +* \f[ +* A = \mathrm{nominal}\prod_i I_i(\theta_i;\mathrm{low}_i/\mathrm{nominal}, 1, \mathrm{high}_i/\mathrm{nominal}). +* \f] +* for multiplicative interpolation modes (interpCodes 1, 5, and 6). The interpCodes determine the function \f$ I_i \f$ (see table below). +* +* Note that a PiecewiseInterpolation with \f$ \mathrm{nominal}=1 \f$, N variations, and a multiplicative interpolation mode is equivalent to N +* PiecewiseInterpolations each with a single variation and the same interpolation code, all inside a RooProduct. +* +* If an \f$ \theta_i \f$ is zero, the distribution is identical to the nominal distribution, at +* \f$ \pm 1 \f$ it is identical to the up/down distribution for that specific \f$ i \f$. +* +* PiecewiseInterpolation will behave identically (except for differences in the interpCode assignments) to a FlexibleInterpVar if both its nominal, and high and low variation sets +* are all RooRealVar. +* +* The class supports several interpolation methods, which can be selected for each parameter separately +* using setInterpCode(). The default interpolation code is 0. The table below provides details of the interpCodes: + +| **interpCode** | **Name** | **Description** | +|----------------|----------|-----------------| +| 0 (default) | Additive Piecewise Linear | \f$ I_0(\theta;x_{-},x_0,x_{+}) = \theta(x_{+} - x_0) \f$ for \f$ \theta>=0 \f$, otherwise \f$ \theta(x_0 - x_{-}) \f$. Not recommended except if using a symmetric variation, because of discontinuities in derivatives. | +| 1 | Multiplicative Piecewise Exponential | \f$ I_1(\theta;x_{-},x_0,x_{+}) = (x_{+}/x_0)^{\theta} \f$ for \f$ \theta>=0 \f$, otherwise \f$ (x_{-}/x_0)^{-\theta} \f$. | +| 2 | Additive Quadratic Interp. + Linear Extrap. | Deprecated by interpCode 4. | +| 4 | Additive Poly Interp. + Linear Extrap. | \f$ I_4(\theta;x_{-},x_0,x_{+}) = I_0(\theta;x_{-},x_0,x_{+}) \f$ if \f$ |\theta|>=1 \f$, otherwise \f$ \theta(\frac{x_{+}-x_{-}}{2}+\theta\frac{x_{+}+x_{-}-2x_{0}}{16}(15+\theta^2(3\alpha^2-10))) \f$ (6th-order polynomial through origin for with matching 0th,1st,2nd derivatives at boundary). | +| 5 | Multiplicative Poly Interp. + Exponential Extrap. | \f$ I_5(\theta;x_{-},x_0,x_{+}) = I_1(\theta;x_{-},x_0,x_{+}) \f$ if \f$ |\theta|>=1 \f$, otherwise 6th-order polynomial for \f$ |\theta_i|<1 \f$ with matching 0th,1st,2nd derivatives at boundary. Recommended for normalization factors. In FlexibleInterpVar this is interpCode=4. | +| 6 | Multiplicative Poly Interp. + Linear Extrap. | \f$ I_6(\theta;x_{-},x_0,x_{+}) = 1+I_4(\theta;x_{-},x_0,x_{+}). \f$ Recommended for normalization factors that must not have roots (i.e. be equal to 0) outside of \f$ |\theta_i|<1 \f$. | + +*/ #include "RooStats/HistFactory/PiecewiseInterpolation.h" @@ -161,14 +179,8 @@ double PiecewiseInterpolation::evaluate() const auto param = static_cast(_paramSet.at(i)); auto low = static_cast(_lowSet.at(i)); auto high = static_cast(_highSet.at(i)); - Int_t icode = _interpCode[i] ; - - if(icode < 0 || icode > 5) { - coutE(InputArguments) << "PiecewiseInterpolation::evaluate ERROR: " << param->GetName() - << " with unknown interpolation code" << icode << endl ; - } using RooFit::Detail::MathFuncs::flexibleInterpSingle; - sum += flexibleInterpSingle(icode, low->getVal(), high->getVal(), 1.0, nominal, param->getVal(), sum); + sum += flexibleInterpSingle(_interpCode[i], low->getVal(), high->getVal(), 1.0, nominal, param->getVal(), sum); } if(_positiveDefinite && (sum<0)){ @@ -190,10 +202,6 @@ void PiecewiseInterpolation::translate(RooFit::Detail::CodeSquashContext &ctx) c std::string resName = "total_" + ctx.getTmpVarName(); for (std::size_t i = 0; i < n; ++i) { - if (_interpCode[i] < 0 || _interpCode[i] > 5) { - coutE(InputArguments) << "PiecewiseInterpolation::evaluate ERROR: " << _paramSet[i].GetName() - << " with unknown interpolation code" << _interpCode[i] << endl; - } if (_interpCode[i] != _interpCode[0]) { coutE(InputArguments) << "FlexibleInterpVar::evaluate ERROR: Code Squashing AD does not yet support having " "different interpolation codes for the same class object " @@ -234,7 +242,7 @@ void PiecewiseInterpolation::translate(RooFit::Detail::CodeSquashContext &ctx) c std::string lowName = ctx.getTmpVarName(); std::string highName = ctx.getTmpVarName(); std::string nominalName = ctx.getTmpVarName(); - code += "unsigned int " + idxName + " = " + nomHist.calculateTreeIndexForCodeSquash(this, ctx, dynamic_cast(*_nominal).variables()) + ";\n"; + code += "unsigned int " + idxName + " = " + nomHist.calculateTreeIndexForCodeSquash(ctx, dynamic_cast(*_nominal).variables()) + ";\n"; code += "double const* " + lowName + " = " + valsLowStr + " + " + nStr + " * " + idxName + ";\n"; code += "double const* " + highName + " = " + valsHighStr + " + " + nStr + " * " + idxName + ";\n"; code += "double " + nominalName + " = *(" + valsNominalStr + " + " + idxName + ");\n"; @@ -250,43 +258,47 @@ void PiecewiseInterpolation::translate(RooFit::Detail::CodeSquashContext &ctx) c ctx.addResult(this, resName); } +namespace { + +inline double broadcast(std::span const &s, std::size_t i) +{ + return s.size() > 1 ? s[i] : s[0]; +} + +} // namespace + //////////////////////////////////////////////////////////////////////////////// /// Interpolate between input distributions for all values of the observable in `evalData`. /// \param[in,out] evalData Struct holding spans pointing to input data. The results of this function will be stored here. /// \param[in] normSet Arguments to normalise over. -void PiecewiseInterpolation::doEval(RooFit::EvalContext & ctx) const +void PiecewiseInterpolation::doEval(RooFit::EvalContext &ctx) const { - std::span sum = ctx.output(); + std::span sum = ctx.output(); - auto nominal = ctx.at(_nominal); - for(unsigned int j=0; j < nominal.size(); ++j) { - sum[j] = nominal[j]; - } + auto nominal = ctx.at(_nominal); - for (unsigned int i=0; i < _paramSet.size(); ++i) { - const double param = static_cast(_paramSet.at(i))->getVal(); - auto low = ctx.at(_lowSet.at(i)); - auto high = ctx.at(_highSet.at(i)); - const int icode = _interpCode[i]; - - if (icode < 0 || icode > 5) { - coutE(InputArguments) << "PiecewiseInterpolation::doEval(): " << _paramSet[i].GetName() - << " with unknown interpolation code" << icode << std::endl; - throw std::invalid_argument("PiecewiseInterpolation::doEval() got invalid interpolation code " + std::to_string(icode)); - } + for (std::size_t j = 0; j < sum.size(); ++j) { + sum[j] = broadcast(nominal, j); + } - for (unsigned int j=0; j < nominal.size(); ++j) { - using RooFit::Detail::MathFuncs::flexibleInterpSingle; - sum[j] += flexibleInterpSingle(icode, low[j], high[j], 1.0, nominal[j], param, sum[j]); - } - } + for (unsigned int i = 0; i < _paramSet.size(); ++i) { + auto param = ctx.at(_paramSet.at(i)); + auto low = ctx.at(_lowSet.at(i)); + auto high = ctx.at(_highSet.at(i)); - if (_positiveDefinite) { - for(unsigned int j=0; j < nominal.size(); ++j) { - if (sum[j] < 0.) - sum[j] = 0.; - } - } + for (std::size_t j = 0; j < sum.size(); ++j) { + using RooFit::Detail::MathFuncs::flexibleInterpSingle; + sum[j] += flexibleInterpSingle(_interpCode[i], broadcast(low, j), broadcast(high, j), 1.0, broadcast(nominal, j), + broadcast(param, j), sum[j]); + } + } + + if (_positiveDefinite) { + for (std::size_t j = 0; j < sum.size(); ++j) { + if (sum[j] < 0.) + sum[j] = 0.; + } + } } //////////////////////////////////////////////////////////////////////////////// @@ -335,7 +347,7 @@ Int_t PiecewiseInterpolation::getAnalyticalIntegralWN(RooArgSet& allVars, RooArg // KC: check if interCode=0 for all - for (auto it = _paramSet.begin(); it != _paramSet.end(); ++it) { + for (auto it = _paramSet.begin(); it != _paramSet.end(); ++it) { if (!_interpCode.empty() && _interpCode[it - _paramSet.begin()] != 0) { // can't factorize integral cout << "can't factorize integral" << endl; @@ -359,7 +371,7 @@ Int_t PiecewiseInterpolation::getAnalyticalIntegralWN(RooArgSet& allVars, RooArg // Make list of function projection and normalization integrals RooAbsReal *func ; - // do variations + // do variations for (auto it = _paramSet.begin(); it != _paramSet.end(); ++it) { auto i = it - _paramSet.begin(); @@ -479,12 +491,12 @@ double PiecewiseInterpolation::analyticalIntegralWN(Int_t code, const RooArgSet* // now get low/high variations // KC: old interp code with new iterator - + i = 0; for (auto const *param : static_range_cast(_paramSet)) { low = static_cast(cache->_lowIntList.at(i)); high = static_cast(cache->_highIntList.at(i)); - + if(param->getVal() > 0) { value += param->getVal()*(high->getVal() - nominal); } else { @@ -561,32 +573,44 @@ double PiecewiseInterpolation::analyticalIntegralWN(Int_t code, const RooArgSet* return value; } - -//////////////////////////////////////////////////////////////////////////////// - -void PiecewiseInterpolation::setInterpCode(RooAbsReal& param, int code, bool silent){ - int index = _paramSet.index(¶m); - if(index<0){ - coutE(InputArguments) << "PiecewiseInterpolation::setInterpCode ERROR: " << param.GetName() - << " is not in list" << endl ; - } else { - if(!silent){ - coutW(InputArguments) << "PiecewiseInterpolation::setInterpCode : " << param.GetName() - << " is now " << code << endl ; - } - _interpCode.at(index) = code; - } +void PiecewiseInterpolation::setInterpCode(RooAbsReal ¶m, int code, bool /*silent*/) +{ + int index = _paramSet.index(¶m); + if (index < 0) { + coutE(InputArguments) << "PiecewiseInterpolation::setInterpCode ERROR: " << param.GetName() << " is not in list" + << std::endl; + return; + } + setInterpCodeForParam(index, code); } - -//////////////////////////////////////////////////////////////////////////////// - -void PiecewiseInterpolation::setAllInterpCodes(int code){ - for(unsigned int i=0; i<_interpCode.size(); ++i){ - _interpCode.at(i) = code; - } +void PiecewiseInterpolation::setAllInterpCodes(int code) +{ + for (std::size_t i = 0; i < _interpCode.size(); ++i) { + setInterpCodeForParam(i, code); + } } +void PiecewiseInterpolation::setInterpCodeForParam(int iParam, int code) +{ + RooAbsArg const ¶m = _paramSet[iParam]; + if (code < 0 || code > 6) { + coutE(InputArguments) << "PiecewiseInterpolation::setInterpCode ERROR: " << param.GetName() + << " with unknown interpolation code " << code << ", keeping current code " + << _interpCode[iParam] << std::endl; + return; + } + if (code == 3) { + // In the past, code 3 was equivalent to code 2, which confused users. + // Now, we just say that code 3 doesn't exist and default to code 2 in + // that case for backwards compatible behavior. + coutE(InputArguments) << "PiecewiseInterpolation::setInterpCode ERROR: " << param.GetName() + << " with unknown interpolation code " << code << ", defaulting to code 2" << std::endl; + code = 2; + } + _interpCode.at(iParam) = code; + setValueDirty(); +} //////////////////////////////////////////////////////////////////////////////// diff --git a/roofit/histfactory/src/RooBarlowBeestonLL.cxx b/roofit/histfactory/src/RooBarlowBeestonLL.cxx index f9ea112eca18b..cb79bf1522d6c 100644 --- a/roofit/histfactory/src/RooBarlowBeestonLL.cxx +++ b/roofit/histfactory/src/RooBarlowBeestonLL.cxx @@ -636,7 +636,7 @@ void RooStats::HistFactory::RooBarlowBeestonLL::validateAbsMin() const _paramAbsMin.removeAll() ; // Only store non-constant parameters here! - std::unique_ptr tmp{(RooArgSet*) _par.selectByAttrib("Constant",false)}; + std::unique_ptr tmp{_par.selectByAttrib("Constant",false)}; _paramAbsMin.addClone(*tmp) ; _obsAbsMin.addClone(_obs) ; diff --git a/roofit/histfactory/src/hist2workspace-argparse.py b/roofit/histfactory/src/hist2workspace-argparse.py deleted file mode 100644 index c00b0dc955993..0000000000000 --- a/roofit/histfactory/src/hist2workspace-argparse.py +++ /dev/null @@ -1,14 +0,0 @@ -import argparse - - -def get_argparse(): - DESCRIPTION = "hist2workspace is a utility to create RooFit/RooStats workspace from histograms" - parser = argparse.ArgumentParser(prog="hist2workspace", description=DESCRIPTION) - parser.add_argument("-v", help="switch HistFactory message stream to INFO level", action="store_true") - parser.add_argument("-vv", help="switch HistFactory message stream to DEBUG level", action="store_true") - parser.add_argument( - "-disable_binned_fit_optimization", - help="disable the binned fit optimization used in HistFactory since ROOT 6.28", - action="store_true", - ) - return parser diff --git a/roofit/histfactory/src/hist2workspace.cxx b/roofit/histfactory/src/hist2workspace.cxx index ef81986727a42..a4bae1cab86f6 100644 --- a/roofit/histfactory/src/hist2workspace.cxx +++ b/roofit/histfactory/src/hist2workspace.cxx @@ -8,10 +8,6 @@ * For the list of contributors see $ROOTSYS/README/CREDITS. * *************************************************************************/ -//////////////////////////////////////////////////////////////////////////////// - - - #include #include #include @@ -21,7 +17,50 @@ #include "RooStats/HistFactory/ConfigParser.h" #include "RooStats/HistFactory/MakeModelAndMeasurementsFast.h" #include "HFMsgService.h" -#include "hist2workspaceCommandLineOptionsHelp.h" + +// When updating the docs for the hist2workspace executable, please recreate +// the Python file with the get_argparse() function (code below), and run the +// following argparse2help.py invocations: +// +// 1. To generate the string in the kCommandLineOptionsHelp variable +// +// python cmake/scripts/argparse2help.py hist2workspace-argparse.py hist2workspaceCommandLineOptionsHelp.h +// +// 2. Recreate the man file that should be put in roofit/man/man1/: +// +// python cmake/scripts/argparse2help.py hist2workspace-argparse.py hist2workspace.1 +// +// The content of hist2workspace-argparse.py is: +// +// ```Python +// import argparse +// +// +// def get_argparse(): +// DESCRIPTION = "hist2workspace is a utility to create RooFit/RooStats workspace from histograms" +// parser = argparse.ArgumentParser(prog="hist2workspace", description=DESCRIPTION) +// parser.add_argument("-v", help="switch HistFactory message stream to INFO level", action="store_true") +// parser.add_argument("-vv", help="switch HistFactory message stream to DEBUG level", action="store_true") +// parser.add_argument( +// "-disable_binned_fit_optimization", +// help="disable the binned fit optimization used in HistFactory since ROOT 6.28", +// action="store_true", +// ) +// return parser +// ``` + +constexpr static const char kCommandLineOptionsHelp[] = R"RAW( +usage: hist2workspace [-h] [-v] [-vv] [-disable_binned_fit_optimization] + +hist2workspace is a utility to create RooFit/RooStats workspace from histograms + +OPTIONS: + -h, --help show this help message and exit + -v switch HistFactory message stream to INFO level + -vv switch HistFactory message stream to DEBUG level + -disable_binned_fit_optimization disable the binned fit optimization used in HistFactory since ROOT 6.28 +)RAW"; + namespace RooStats { namespace HistFactory { diff --git a/roofit/histfactory/test/CMakeLists.txt b/roofit/histfactory/test/CMakeLists.txt index f435a5dff1ba1..947f65518f8d6 100644 --- a/roofit/histfactory/test/CMakeLists.txt +++ b/roofit/histfactory/test/CMakeLists.txt @@ -13,7 +13,7 @@ if(NOT MSVC OR win_broken_tests) endif() ROOT_ADD_GTEST(testHistFactory testHistFactory.cxx - LIBRARIES RooFitCore RooFit RooStats HistFactory RooFitHS3 + LIBRARIES RooFitCore RooFit RooStats HistFactory RooFitHS3 RooFitJSONInterface COPY_TO_BUILDDIR ${CMAKE_CURRENT_SOURCE_DIR}/ref_6.16_example_UsingC_channel1_meas_model.root ${CMAKE_CURRENT_SOURCE_DIR}/ref_6.16_example_UsingC_combined_meas_model.root) if(clad) @@ -21,4 +21,5 @@ if(clad) endif(clad) ROOT_ADD_GTEST(testParamHistFunc testParamHistFunc.cxx LIBRARIES RooFitCore HistFactory) +ROOT_ADD_GTEST(testPiecewiseInterpolation testPiecewiseInterpolation.cxx LIBRARIES RooFitCore HistFactory) ROOT_ADD_GTEST(testHistFactoryPlotting testHistFactoryPlotting.cxx LIBRARIES RooFitCore HistFactory) diff --git a/roofit/histfactory/test/testHistFactory.cxx b/roofit/histfactory/test/testHistFactory.cxx index 035d3c2ae355e..ae88dcc673600 100644 --- a/roofit/histfactory/test/testHistFactory.cxx +++ b/roofit/histfactory/test/testHistFactory.cxx @@ -27,8 +27,6 @@ #include #include -#include "../src/JSONTool.h" - #include "../../roofitcore/test/gtest_wrapper.h" #include @@ -511,59 +509,6 @@ void setInitialFitParameters(RooWorkspace &ws, MakeModelMode makeModelMode) } } -TEST_P(HFFixture, HistFactoryJSONTool) -{ - const MakeModelMode makeModelMode = std::get<0>(GetParam()); - - RooHelpers::LocalChangeMsgLevel changeMsgLvl(RooFit::WARNING); - - if (writeJsonFiles) { - RooStats::HistFactory::JSONTool{*_measurement}.PrintJSON(_name + "_1.json"); - } - std::stringstream ss; - RooStats::HistFactory::JSONTool{*_measurement}.PrintJSON(ss); - - RooWorkspace wsFromJson{"ws1"}; - RooJSONFactoryWSTool{wsFromJson}.importJSONfromString(ss.str()); - - auto *mc = dynamic_cast(ws->obj("ModelConfig")); - EXPECT_TRUE(mc != nullptr); - - auto *mcFromJson = dynamic_cast(wsFromJson.obj("ModelConfig")); - EXPECT_TRUE(mcFromJson != nullptr); - - RooAbsPdf *pdf = mc->GetPdf(); - EXPECT_TRUE(pdf != nullptr); - - RooAbsPdf *pdfFromJson = mcFromJson->GetPdf(); - EXPECT_TRUE(pdfFromJson != nullptr); - - RooAbsData *data = ws->data("obsData"); - EXPECT_TRUE(data != nullptr); - - RooAbsData *dataFromJson = wsFromJson.data("obsData"); - EXPECT_TRUE(dataFromJson != nullptr); - - RooArgSet const &globs = *mc->GetGlobalObservables(); - RooArgSet const &globsFromJson = *mcFromJson->GetGlobalObservables(); - - setInitialFitParameters(*ws, makeModelMode); - setInitialFitParameters(wsFromJson, makeModelMode); - - using namespace RooFit; - using Res = std::unique_ptr; - - Res result{pdf->fitTo(*data, Strategy(1), Minos(*mc->GetParametersOfInterest()), GlobalObservables(globs), - PrintLevel(-1), Save())}; - - Res resultFromJson{pdfFromJson->fitTo(*dataFromJson, Strategy(1), Minos(*mcFromJson->GetParametersOfInterest()), - GlobalObservables(globsFromJson), PrintLevel(-1), Save())}; - - // Do also the reverse comparison to check that the set of constant parameters matches - EXPECT_TRUE(result->isIdentical(*resultFromJson)); - EXPECT_TRUE(resultFromJson->isIdentical(*result)); -} - TEST_P(HFFixture, HS3ClosureLoop) { const MakeModelMode makeModelMode = std::get<0>(GetParam()); @@ -578,7 +523,7 @@ TEST_P(HFFixture, HS3ClosureLoop) std::string const &js = RooJSONFactoryWSTool{*ws}.exportJSONtoString(); if (writeJsonFiles) { - RooJSONFactoryWSTool{*ws}.exportJSON(_name + "_2.json"); + RooJSONFactoryWSTool{*ws}.exportJSON(_name + "_1.json"); } RooWorkspace wsFromJson("new"); @@ -588,7 +533,7 @@ TEST_P(HFFixture, HS3ClosureLoop) std::string const &js3 = RooJSONFactoryWSTool{wsFromJson}.exportJSONtoString(); if (writeJsonFiles) { - RooJSONFactoryWSTool{wsFromJson}.exportJSON(_name + "_3.json"); + RooJSONFactoryWSTool{wsFromJson}.exportJSON(_name + "_2.json"); } // Chack that JSON > WS > JSON doesn't change the JSON @@ -764,18 +709,5 @@ INSTANTIATE_TEST_SUITE_P(HistFactory, HFFixtureFit, testing::Combine(testing::Values(MakeModelMode::OverallSyst, MakeModelMode::HistoSyst, MakeModelMode::StatSyst, MakeModelMode::ShapeSyst), testing::Values(false, true), // non-uniform bins or not - testing::Values(ROOFIT_EVAL_BACKENDS)), - getNameFromInfo); - -#if !defined(_MSC_VER) || defined(R__ENABLE_BROKEN_WIN_TESTS) // See https://github.com/vgvassilev/clad/issues/752 -#ifdef TEST_CODEGEN_AD -// TODO: merge with the previous HFFixtureFix test suite once the codegen AD -// supports all of HistFactory -INSTANTIATE_TEST_SUITE_P(HistFactoryCodeGen, HFFixtureFit, - testing::Combine(testing::Values(MakeModelMode::OverallSyst, MakeModelMode::HistoSyst, - MakeModelMode::StatSyst, MakeModelMode::ShapeSyst), - testing::Values(false), // no non-uniform bins - testing::Values(RooFit::EvalBackend::Codegen())), + testing::Values(ROOFIT_EVAL_BACKENDS_WITH_CODEGEN)), getNameFromInfo); -#endif // TEST_CODEGEN_AD -#endif // R__WIN32 diff --git a/roofit/histfactory/test/testPiecewiseInterpolation.cxx b/roofit/histfactory/test/testPiecewiseInterpolation.cxx new file mode 100644 index 0000000000000..a51bad25fc645 --- /dev/null +++ b/roofit/histfactory/test/testPiecewiseInterpolation.cxx @@ -0,0 +1,103 @@ +// Tests for the PiecewiseInterpolation +// Authors: Jonas Rembser, CERN 12/2024 + +#include + +#include + +#include + +/// Validate that the interpolation codes are "additive" or "multiplicative" as documented. +TEST(PiecewiseInterpolation, AdditiveOrMultiplicative) +{ + using namespace RooFit; + + // In the usual use cases, the nominal value is 1.0, but we spice up this + // test a little bit by changing that. + double nVal = 3.0; + RooRealVar nominal{"nominal", "nominal", nVal}; + + RooRealVar param1{"param_1", "param_1", -2., 2.}; + RooRealVar low1{"low_1", "low_1", nVal * 0.75}; + RooRealVar high1{"high_1", "high_1", nVal * 1.5}; + + RooRealVar param2{"param_2", "param_2", -1.5, 1.5}; + RooRealVar low2{"low_2", "low_2", nVal * 0.8}; + RooRealVar high2{"high_2", "high_2", nVal * 1.25}; + + int nBins = 10; + + param1.setBins(nBins); + param2.setBins(nBins); + + RooArgList paramsSet1{param1}; + RooArgList lowSet1{low1}; + RooArgList highSet1{high1}; + + RooArgList paramsSet2{param2}; + RooArgList lowSet2{low2}; + RooArgList highSet2{high2}; + + RooArgList paramsSetBoth{param1, param2}; + RooArgList lowSetBoth{low1, low2}; + RooArgList highSetBoth{high1, high2}; + + PiecewiseInterpolation pci1{"piecewise1", "", nominal, lowSet1, highSet1, paramsSet1}; + PiecewiseInterpolation pci2{"piecewise2", "", nominal, lowSet2, highSet2, paramsSet2}; + PiecewiseInterpolation pciBoth{"piecewiseBoth", "", nominal, lowSetBoth, highSetBoth, paramsSetBoth}; + + std::vector codes{0, 1, 2, 4, 5, 6}; + std::vector isMultiplicative{false, true, false, false, true, true}; + + for (std::size_t iCode = 0; iCode < codes.size(); ++iCode) { + + int code = codes[iCode]; + + pci1.setAllInterpCodes(code); + pci2.setAllInterpCodes(code); + pciBoth.setAllInterpCodes(code); + + // basic check that when param1 and param2 are equal to 1, pci1 and pci2 are equal to high + // and pciBoth is equal when the respective parameter is 1 + param2.setVal(0); + param1.setVal(1); + EXPECT_FLOAT_EQ(pci1.getVal(), high1.getVal()); + EXPECT_FLOAT_EQ(pciBoth.getVal(), high1.getVal()); + param1.setVal(0); + param2.setVal(1); + EXPECT_FLOAT_EQ(pci2.getVal(), high2.getVal()); + EXPECT_FLOAT_EQ(pciBoth.getVal(), high2.getVal()); + param2.setVal(0); + // and similarly for -1 + param1.setVal(-1); + EXPECT_FLOAT_EQ(pci1.getVal(), low1.getVal()); + EXPECT_FLOAT_EQ(pciBoth.getVal(), low1.getVal()); + param1.setVal(0); + param2.setVal(-1); + EXPECT_FLOAT_EQ(pci2.getVal(), low2.getVal()); + EXPECT_FLOAT_EQ(pciBoth.getVal(), low2.getVal()); + param2.setVal(0); + + for (int ibin1 = 0; ibin1 < param1.numBins(); ++ibin1) { + for (int ibin2 = 0; ibin2 < param2.numBins(); ++ibin2) { + param1.setBin(ibin1); + param2.setBin(ibin2); + + double nom = nominal.getVal(); + double v1 = pci1.getVal(); + double v2 = pci2.getVal(); + double vBoth = pciBoth.getVal(); + + // The definition of multiplicative and additive is in this test + double vBothMultRef = (v1 / nom) * (v2 / nom) * nom; + double vBothAddiRef = (v1 - nom) + (v2 - nom) + nom; + + if (isMultiplicative[iCode]) { + EXPECT_FLOAT_EQ(vBoth, vBothMultRef); + } else { + EXPECT_FLOAT_EQ(vBoth, vBothAddiRef); + } + } + } + } +} diff --git a/roofit/hs3/src/JSONFactories_RooFitCore.cxx b/roofit/hs3/src/JSONFactories_RooFitCore.cxx index efb859975b85e..3cfdbfc219256 100644 --- a/roofit/hs3/src/JSONFactories_RooFitCore.cxx +++ b/roofit/hs3/src/JSONFactories_RooFitCore.cxx @@ -545,25 +545,15 @@ class RooFormulaArgStreamer : public RooFit::JSONIO::Exporter { const RooArg_t *pdf = static_cast(func); elem["type"] << key(); TString expression(pdf->expression()); - std::vector> paramsWithIndex; - paramsWithIndex.reserve(pdf->nParameters()); - for (size_t i = 0; i < pdf->nParameters(); ++i) { - paramsWithIndex.emplace_back(pdf->getParameter(i), i); - } - std::sort(paramsWithIndex.begin(), paramsWithIndex.end()); // If the tokens follow the "x[#]" convention, the square braces enclosing each number // ensures that there is a unique mapping between the token and parameter name - for (auto [par, idx] : paramsWithIndex) { - expression.ReplaceAll(("x[" + std::to_string(idx) + "]").c_str(), par->GetName()); - } // If the tokens follow the "@#" convention, the numbers are not enclosed by braces. - // So there may be tokens with numbers whose lower place value forms a subset string of ones with a higher place value, - // e.g. "@1" is a subset of "@10". - // So the names of these parameters must be applied descending from the highest place value - // in order to ensure each parameter name is uniquely applied to its token. - for (auto it = paramsWithIndex.rbegin(); it != paramsWithIndex.rend(); ++it) { - RooAbsArg* par = it->first; - std::size_t idx = it->second; + // So there may be tokens with numbers whose lower place value forms a subset string of ones with a higher place + // value, e.g. "@1" is a subset of "@10". So the names of these parameters must be applied descending from the + // highest place value in order to ensure each parameter name is uniquely applied to its token. + for (size_t idx = pdf->nParameters(); idx--;) { + const RooAbsArg *par = pdf->getParameter(idx); + expression.ReplaceAll(("x[" + std::to_string(idx) + "]").c_str(), par->GetName()); expression.ReplaceAll(("@" + std::to_string(idx)).c_str(), par->GetName()); } elem["expression"] << expression.Data(); diff --git a/roofit/jsoninterface/CMakeLists.txt b/roofit/jsoninterface/CMakeLists.txt index cdfa3d71d4ed0..db632cf93d6a6 100644 --- a/roofit/jsoninterface/CMakeLists.txt +++ b/roofit/jsoninterface/CMakeLists.txt @@ -52,3 +52,5 @@ if(builtin_nlohmannjson) else() target_link_libraries(RooFitJSONInterface PRIVATE nlohmann_json::nlohmann_json) endif() + +ROOT_ADD_TEST_SUBDIRECTORY(test) diff --git a/roofit/jsoninterface/inc/RooFit/Detail/JSONInterface.h b/roofit/jsoninterface/inc/RooFit/Detail/JSONInterface.h index cf4c46bd49882..6cc67210d3c6e 100644 --- a/roofit/jsoninterface/inc/RooFit/Detail/JSONInterface.h +++ b/roofit/jsoninterface/inc/RooFit/Detail/JSONInterface.h @@ -13,7 +13,11 @@ #ifndef RooFit_Detail_JSONInterface_h #define RooFit_Detail_JSONInterface_h +#include + #include +#include +#include #include #include #include @@ -249,6 +253,39 @@ std::vector &operator<<(std::vector &v, RooFit::Detail::JSONNode const &n) return v; } +inline RooFit::Detail::JSONNode &operator<<(RooFit::Detail::JSONNode &n, std::span v) +{ + n.fill_seq(v); + return n; +} + +inline RooFit::Detail::JSONNode &operator<<(RooFit::Detail::JSONNode &n, std::span v) +{ + n.fill_seq(v); + return n; +} + +template +RooFit::Detail::JSONNode & +operator<<(RooFit::Detail::JSONNode &n, const std::unordered_map &m) +{ + n.set_map(); + for (const auto &it : m) { + n[it.first] << it.second; + } + return n; +} + +template +RooFit::Detail::JSONNode &operator<<(RooFit::Detail::JSONNode &n, const std::map &m) +{ + n.set_map(); + for (const auto &it : m) { + n[it.first] << it.second; + } + return n; +} + template <> inline int JSONNode::val_t() const { diff --git a/roofit/jsoninterface/test/CMakeLists.txt b/roofit/jsoninterface/test/CMakeLists.txt new file mode 100644 index 0000000000000..3c2834518d640 --- /dev/null +++ b/roofit/jsoninterface/test/CMakeLists.txt @@ -0,0 +1 @@ +ROOT_ADD_GTEST(testJSONInterface testJSONInterface.cxx LIBRARIES RooFitJSONInterface Matrix) diff --git a/roofit/jsoninterface/test/testJSONInterface.cxx b/roofit/jsoninterface/test/testJSONInterface.cxx new file mode 100644 index 0000000000000..5bec84a240559 --- /dev/null +++ b/roofit/jsoninterface/test/testJSONInterface.cxx @@ -0,0 +1,27 @@ +// Tests for the RooFit JSON interface +// Authors: Jonas Rembser, CERN 12/2024 + +#include + +#include + +#include + +TEST(JSONInterface, MapsOfTVectorD) +{ + using RooFit::Detail::JSONNode; + using RooFit::Detail::JSONTree; + + std::unique_ptr tree = JSONTree::create(); + JSONNode &rootnode = tree->rootnode(); + + rootnode.set_map(); + + rootnode["map"] << std::map{{"vec", TVectorD{3}}}; + rootnode["unordered_map"] << std::unordered_map{{"vec", TVectorD{3}}}; + + // For debugging: + // std::stringstream ss; + // rootnode.writeJSON(ss); + // std::cout << ss.str() << std::endl; +} diff --git a/roofit/man/man1/hist2workspace.1 b/roofit/man/man1/hist2workspace.1 new file mode 100644 index 0000000000000..2bcee0e5aa4cf --- /dev/null +++ b/roofit/man/man1/hist2workspace.1 @@ -0,0 +1,15 @@ +.TH hist2workspace 1 +.SH SYNOPSIS +usage: hist2workspace [-h] [-v] [-vv] [-disable_binned_fit_optimization] + +.SH DESCRIPTION +hist2workspace is a utility to create RooFit/RooStats workspace from histograms +.SH OPTIONS +.IP -h --help +show this help message and exit +.IP -v +switch HistFactory message stream to INFO level +.IP -vv +switch HistFactory message stream to DEBUG level +.IP -disable_binned_fit_optimization +disable the binned fit optimization used in HistFactory since ROOT 6.28 diff --git a/roofit/roofit/inc/RooChi2MCSModule.h b/roofit/roofit/inc/RooChi2MCSModule.h index 87c4a8f5e9af0..f1844158d0b25 100644 --- a/roofit/roofit/inc/RooChi2MCSModule.h +++ b/roofit/roofit/inc/RooChi2MCSModule.h @@ -19,6 +19,8 @@ #include "RooAbsMCStudyModule.h" +#include + class RooChi2MCSModule : public RooAbsMCStudyModule { public: @@ -32,16 +34,13 @@ class RooChi2MCSModule : public RooAbsMCStudyModule { bool processAfterFit(Int_t /*sampleNum*/) override ; private: + std::unique_ptr _data; // Summary dataset to store results + std::unique_ptr _chi2; // Chi^2 of function w.r.t. data + std::unique_ptr _ndof; // Number of degrees of freedom + std::unique_ptr _chi2red; // Reduced Chi^2 w.r.t data + std::unique_ptr _prob; // Probability of chi^2,nDOF combination - RooDataSet* _data = nullptr; // Summary dataset to store results - RooRealVar* _chi2 = nullptr; // Chi^2 of function w.r.t. data - RooRealVar* _ndof = nullptr; // Number of degrees of freedom - RooRealVar* _chi2red = nullptr; // Reduced Chi^2 w.r.t data - RooRealVar* _prob = nullptr; // Probability of chi^2,nDOF combination - - ClassDefOverride(RooChi2MCSModule,0) // MCStudy module to calculate chi2 between binned data and fit -} ; - + ClassDefOverride(RooChi2MCSModule, 0) // MCStudy module to calculate chi2 between binned data and fit +}; #endif - diff --git a/roofit/roofit/inc/RooCrystalBall.h b/roofit/roofit/inc/RooCrystalBall.h index 04b86397f477c..32f09fbc5f371 100644 --- a/roofit/roofit/inc/RooCrystalBall.h +++ b/roofit/roofit/inc/RooCrystalBall.h @@ -8,12 +8,9 @@ #include -class RooRealVar; - class RooCrystalBall final : public RooAbsPdf { public: - - RooCrystalBall(){}; + RooCrystalBall() {}; RooCrystalBall(const char *name, const char *title, RooAbsReal &x, RooAbsReal &x0, RooAbsReal &sigmaL, RooAbsReal &sigmaR, RooAbsReal &alphaL, RooAbsReal &nL, RooAbsReal &alphaR, RooAbsReal &nR); @@ -32,6 +29,22 @@ class RooCrystalBall final : public RooAbsPdf { Int_t getMaxVal(const RooArgSet &vars) const override; double maxVal(Int_t code) const override; + // Getters for non-optional parameters + RooAbsReal const &x() const { return *x_; } + RooAbsReal const &x0() const { return *x0_; } + RooAbsReal const &sigmaL() const { return *sigmaL_; } + RooAbsReal const &sigmaR() const { return *sigmaR_; } + RooAbsReal const &alphaL() const { return *alphaL_; } + RooAbsReal const &nL() const { return *nL_; } + + // Getters for optional parameter: return nullptr if parameter is not set + RooAbsReal const *alphaR() const { return alphaR_ ? &**alphaR_ : nullptr; } + RooAbsReal const *nR() const { return nR_ ? &**nR_ : nullptr; } + + // Convenience functions to check if optional parameters are set + bool hasAlphaR() const { return alphaR_ != nullptr; } + bool hasNR() const { return nR_ != nullptr; } + protected: double evaluate() const override; diff --git a/roofit/roofit/src/RooChi2MCSModule.cxx b/roofit/roofit/src/RooChi2MCSModule.cxx index cbec0a4628e4c..1f2993ae81aab 100644 --- a/roofit/roofit/src/RooChi2MCSModule.cxx +++ b/roofit/roofit/src/RooChi2MCSModule.cxx @@ -50,40 +50,24 @@ RooChi2MCSModule::RooChi2MCSModule(const RooChi2MCSModule &other) : RooAbsMCStud //////////////////////////////////////////////////////////////////////////////// /// Destructor -RooChi2MCSModule:: ~RooChi2MCSModule() -{ - if (_chi2) { - delete _chi2 ; - } - if (_ndof) { - delete _ndof ; - } - if (_chi2red) { - delete _chi2red ; - } - if (_prob) { - delete _prob ; - } - if (_data) { - delete _data ; - } -} +RooChi2MCSModule::~RooChi2MCSModule() = default; //////////////////////////////////////////////////////////////////////////////// /// Initialize module after attachment to RooMCStudy object bool RooChi2MCSModule::initializeInstance() { - // Construct variable that holds -log(L) fit with null hypothesis for given parameter - _chi2 = new RooRealVar("chi2","chi^2",0) ; - _ndof = new RooRealVar("ndof","number of degrees of freedom",0) ; - _chi2red = new RooRealVar("chi2red","reduced chi^2",0) ; - _prob = new RooRealVar("prob","prob(chi2,ndof)",0) ; + // Construct variable that holds -log(L) fit with null hypothesis for given parameter + _chi2 = std::make_unique("chi2", "chi^2", 0); + _ndof = std::make_unique("ndof", "number of degrees of freedom", 0); + _chi2red = std::make_unique("chi2red", "reduced chi^2", 0); + _prob = std::make_unique("prob", "prob(chi2,ndof)", 0); - // Create new dataset to be merged with RooMCStudy::fitParDataSet - _data = new RooDataSet("Chi2Data","Additional data for Chi2 study",RooArgSet(*_chi2,*_ndof,*_chi2red,*_prob)) ; + // Create new dataset to be merged with RooMCStudy::fitParDataSet + _data = std::make_unique("Chi2Data", "Additional data for Chi2 study", + RooArgSet(*_chi2, *_ndof, *_chi2red, *_prob)); - return true ; + return true; } //////////////////////////////////////////////////////////////////////////////// @@ -100,9 +84,9 @@ bool RooChi2MCSModule::initializeRun(Int_t /*numSamples*/) /// calculations of this module so that it is merged with /// RooMCStudy::fitParDataSet() by RooMCStudy -RooDataSet* RooChi2MCSModule::finalizeRun() +RooDataSet *RooChi2MCSModule::finalizeRun() { - return _data ; + return _data.get(); } //////////////////////////////////////////////////////////////////////////////// @@ -120,7 +104,7 @@ bool RooChi2MCSModule::processAfterFit(Int_t /*sampleNum*/) std::unique_ptr chi2Var{fitModel()->createChi2(*binnedData,RooFit::Extended(extendedGen()),RooFit::DataError(RooAbsData::SumW2))}; - std::unique_ptr floatPars{static_cast(fitParams()->selectByAttrib("Constant",false))}; + std::unique_ptr floatPars{fitParams()->selectByAttrib("Constant",false)}; _chi2->setVal(chi2Var->getVal()) ; _ndof->setVal(binnedData->numEntries()-floatPars->size()-1) ; diff --git a/roofit/roofit/src/RooParamHistFunc.cxx b/roofit/roofit/src/RooParamHistFunc.cxx index 0ca4527d5448e..b3a93fe939a88 100644 --- a/roofit/roofit/src/RooParamHistFunc.cxx +++ b/roofit/roofit/src/RooParamHistFunc.cxx @@ -79,7 +79,7 @@ double RooParamHistFunc::evaluate() const void RooParamHistFunc::translate(RooFit::Detail::CodeSquashContext &ctx) const { - std::string const &idx = _dh.calculateTreeIndexForCodeSquash(this, ctx, _x); + std::string const &idx = _dh.calculateTreeIndexForCodeSquash(ctx, _x); std::string arrName = ctx.buildArg(_p); std::string result = arrName + "[" + idx + "]"; if (_relParam) { diff --git a/roofit/roofitcore/inc/RooAbsBinning.h b/roofit/roofitcore/inc/RooAbsBinning.h index 52f43ffc11a4e..ce8ee4368e2ad 100644 --- a/roofit/roofitcore/inc/RooAbsBinning.h +++ b/roofit/roofitcore/inc/RooAbsBinning.h @@ -16,11 +16,19 @@ #ifndef ROO_ABS_BINNING #define ROO_ABS_BINNING -#include "Rtypes.h" -#include "RooPrintable.h" -#include "TNamed.h" +#include +#include + +#include + class RooAbsRealLValue ; +class RooAbsArg ; class RooAbsReal ; +namespace RooFit { +namespace Detail { +class CodeSquashContext; +} +} class RooAbsBinning : public TNamed, public RooPrintable { public: @@ -63,6 +71,8 @@ class RooAbsBinning : public TNamed, public RooPrintable { return out; } + virtual std::string translateBinNumber(RooFit::Detail::CodeSquashContext &ctx, RooAbsArg const &var, int coef) const; + virtual double binCenter(Int_t bin) const = 0 ; virtual double binWidth(Int_t bin) const = 0 ; virtual double binLow(Int_t bin) const = 0 ; diff --git a/roofit/roofitcore/inc/RooAbsCollection.h b/roofit/roofitcore/inc/RooAbsCollection.h index 05ed70a6dc59e..56ce60ba09e36 100644 --- a/roofit/roofitcore/inc/RooAbsCollection.h +++ b/roofit/roofitcore/inc/RooAbsCollection.h @@ -408,15 +408,6 @@ class RooAbsCollection : public TObject, public RooPrintable { private: -#if ROOT_VERSION_CODE < ROOT_VERSION(6, 34, 00) - // TODO: Remove this friend declaration and function in 6.34, where it's not - // needed anymore because the deprecated legacy iterators will be removed. - friend class RooWorkspace; - std::unique_ptr makeLegacyIterator (bool forward = true) const; -#else -#error "Please remove this unneeded code." -#endif - bool replaceImpl(const RooAbsArg& var1, const RooAbsArg& var2); using HashAssistedFind = RooFit::Detail::HashAssistedFind; diff --git a/roofit/roofitcore/inc/RooArgSet.h b/roofit/roofitcore/inc/RooArgSet.h index 536e83c21d0f2..3f3e56ca04036 100644 --- a/roofit/roofitcore/inc/RooArgSet.h +++ b/roofit/roofitcore/inc/RooArgSet.h @@ -140,6 +140,11 @@ class RooArgSet : public RooAbsCollection { using RooAbsCollection::selectCommon; using RooAbsCollection::snapshot; + /// Use RooAbsCollection::selectByAttrib(), but return as RooArgSet. + RooArgSet* selectByAttrib(const char* name, bool value) const { + return static_cast(RooAbsCollection::selectByAttrib(name, value)); + } + /// Use RooAbsCollection::selectByName(), but return as RooArgSet. inline RooArgSet* selectByName(const char* nameList, bool verbose=false) const { return static_cast(RooAbsCollection::selectByName(nameList, verbose)); diff --git a/roofit/roofitcore/inc/RooBinning.h b/roofit/roofitcore/inc/RooBinning.h index d8dac756e385e..f849d822aac64 100644 --- a/roofit/roofitcore/inc/RooBinning.h +++ b/roofit/roofitcore/inc/RooBinning.h @@ -69,6 +69,8 @@ class RooBinning : public RooAbsBinning { void addUniform(Int_t nBins, double xlo, double xhi); bool removeBoundary(double boundary); + std::string translateBinNumber(RooFit::Detail::CodeSquashContext &ctx, RooAbsArg const &var, int coef) const override; + protected: bool binEdges(Int_t bin, double& xlo, double& xhi) const; diff --git a/roofit/roofitcore/inc/RooCmdArg.h b/roofit/roofitcore/inc/RooCmdArg.h index f4133cd256172..0a37beb34facd 100644 --- a/roofit/roofitcore/inc/RooCmdArg.h +++ b/roofit/roofitcore/inc/RooCmdArg.h @@ -39,6 +39,7 @@ class RooCmdArg : public TNamed { const char* s1=nullptr, const char* s2=nullptr, const TObject* o1=nullptr, const TObject* o2=nullptr, const RooCmdArg* ca=nullptr, const char* s3=nullptr, const RooArgSet* c1=nullptr, const RooArgSet* c2=nullptr) ; + RooCmdArg(const RooCmdArg& other) ; RooCmdArg& operator=(const RooCmdArg& other) ; void addArg(const RooCmdArg& arg) ; @@ -107,6 +108,8 @@ class RooCmdArg : public TNamed { bool procSubArgs() const { return _procSubArgs; } bool prefixSubArgs() const { return _prefixSubArgs; } + std::string constructorCode() const; + private: static const RooCmdArg _none ; ///< Static instance of null object diff --git a/roofit/roofitcore/inc/RooDataHist.h b/roofit/roofitcore/inc/RooDataHist.h index 2da88fc9da5a9..22f5426fc4909 100644 --- a/roofit/roofitcore/inc/RooDataHist.h +++ b/roofit/roofitcore/inc/RooDataHist.h @@ -218,7 +218,7 @@ class RooDataHist : public RooAbsData, public RooDirItem { double const* wgtErrHiArray() const { return _errHi; } double const* sumW2Array() const { return _sumw2; } - std::string calculateTreeIndexForCodeSquash(RooAbsArg const *klass, RooFit::Detail::CodeSquashContext &ctx, + std::string calculateTreeIndexForCodeSquash(RooFit::Detail::CodeSquashContext &ctx, const RooAbsCollection &coords, bool reverse = false) const; std::string declWeightArrayForCodeSquash(RooFit::Detail::CodeSquashContext &ctx, bool correctForBinSize) const; diff --git a/roofit/roofitcore/inc/RooFit/Detail/MathFuncs.h b/roofit/roofitcore/inc/RooFit/Detail/MathFuncs.h index 7119cc4b9a55d..22865c1f33fd9 100644 --- a/roofit/roofitcore/inc/RooFit/Detail/MathFuncs.h +++ b/roofit/roofitcore/inc/RooFit/Detail/MathFuncs.h @@ -18,12 +18,11 @@ #include #include +#include #include namespace RooFit { - namespace Detail { - namespace MathFuncs { /// Calculates the binomial coefficient n over k. @@ -130,8 +129,9 @@ template inline double polynomial(double const *coeffs, int nCoeffs, int lowestOrder, double x) { double retVal = coeffs[nCoeffs - 1]; - for (int i = nCoeffs - 2; i >= 0; i--) + for (int i = nCoeffs - 2; i >= 0; i--) { retVal = coeffs[i] + x * retVal; + } retVal = retVal * std::pow(x, lowestOrder); return retVal + (pdfMode && lowestOrder > 0 ? 1.0 : 0.0); } @@ -169,13 +169,32 @@ inline double constraintSum(double const *comp, unsigned int compSize) return sum; } -inline unsigned int getUniformBinning(double low, double high, double val, unsigned int numBins) +inline unsigned int uniformBinNumber(double low, double high, double val, unsigned int numBins, double coef) { double binWidth = (high - low) / numBins; - return val >= high ? numBins - 1 : std::abs((val - low) / binWidth); + return coef * (val >= high ? numBins - 1 : std::abs((val - low) / binWidth)); +} + +inline unsigned int rawBinNumber(double x, double const *boundaries, std::size_t nBoundaries) +{ + double const *end = boundaries + nBoundaries; + double const *it = std::lower_bound(boundaries, end, x); + // always return valid bin number + while (boundaries != it && (end == it || end == it + 1 || x < *it)) { + --it; + } + return it - boundaries; +} + +inline unsigned int +binNumber(double x, double coef, double const *boundaries, unsigned int nBoundaries, int nbins, int blo) +{ + const int rawBin = rawBinNumber(x, boundaries, nBoundaries); + int tmp = std::min(nbins, rawBin - blo); + return coef * std::max(0, tmp); } -inline double interpolate1d(double low, double high, double val, unsigned int numBins, double const* vals) +inline double interpolate1d(double low, double high, double val, unsigned int numBins, double const *vals) { double binWidth = (high - low) / numBins; int idx = val >= high ? numBins - 1 : std::abs((val - low) / binWidth); @@ -185,9 +204,9 @@ inline double interpolate1d(double low, double high, double val, unsigned int nu if (val > low + 0.5 * binWidth && val < high - 0.5 * binWidth) { double slope; if (val < central) { - slope = vals[idx] - vals[idx - 1]; + slope = vals[idx] - vals[idx - 1]; } else { - slope = vals[idx + 1] - vals[idx]; + slope = vals[idx + 1] - vals[idx]; } return vals[idx] + slope * (val - central) / binWidth; } @@ -239,34 +258,39 @@ inline double flexibleInterpSingle(unsigned int code, double low, double high, d } else { return a * std::pow(paramVal, 2) + b * paramVal + c; } - } else if (code == 3) { - // parabolic version of log-normal - double a = 0.5 * (high + low) - nominal; - double b = 0.5 * (high - low); - double c = 0; - if (paramVal > 1) { - return (2 * a + b) * (paramVal - 1) + high - nominal; - } else if (paramVal < -1) { - return -1 * (2 * a - b) * (paramVal + 1) + low - nominal; - } else { - return a * std::pow(paramVal, 2) + b * paramVal + c; - } - } else if (code == 4) { + // According to an old comment in the source code, code 3 was apparently + // meant to be a "parabolic version of log-normal", but it never got + // implemented. If someone would need it, it could be implemented as doing + // code 2 in log space. + } else if (code == 4 || code == 6) { double x = paramVal; + double mod = 1.0; + if (code == 6) { + high /= nominal; + low /= nominal; + nominal = 1; + } if (x >= boundary) { - return x * (high - nominal); + mod = x * (high - nominal); } else if (x <= -boundary) { - return x * (nominal - low); + mod = x * (nominal - low); + } else { + // interpolate 6th degree + double t = x / boundary; + double eps_plus = high - nominal; + double eps_minus = nominal - low; + double S = 0.5 * (eps_plus + eps_minus); + double A = 0.0625 * (eps_plus - eps_minus); + + mod = x * (S + t * A * (15 + t * t * (-10 + t * t * 3))); } - // interpolate 6th degree - double t = x / boundary; - double eps_plus = high - nominal; - double eps_minus = nominal - low; - double S = 0.5 * (eps_plus + eps_minus); - double A = 0.0625 * (eps_plus - eps_minus); + // code 6 is multiplicative version of code 4 + if (code == 6) { + mod *= res; + } + return mod; - return x * (S + t * A * (15 + t * t * (-10 + t * t * 3))); } else if (code == 5) { double x = paramVal; double mod = 1.0; @@ -385,7 +409,7 @@ inline double cbShape(double m, double m0, double sigma, double alpha, double n) if (alpha < 0) t = -t; - double absAlpha = std::abs((double)alpha); + double absAlpha = std::abs(alpha); if (t >= -absAlpha) { return std::exp(-0.5 * t * t); @@ -728,10 +752,43 @@ inline double bernsteinIntegral(double xlo, double xhi, double xmin, double xmax return norm * (xmax - xmin); } -} // namespace MathFuncs +inline double multiVarGaussian(int n, const double *x, const double *mu, const double *covI) +{ + double result = 0.0; + + // Compute the bilinear form (x-mu)^T * covI * (x-mu) + for (int i = 0; i < n; ++i) { + for (int j = 0; j < n; ++j) { + result += (x[i] - mu[i]) * covI[i * n + j] * (x[j] - mu[j]); + } + } + return std::exp(-0.5 * result); +} +} // namespace MathFuncs } // namespace Detail +} // namespace RooFit + +namespace clad { +namespace custom_derivatives { +namespace RooFit { +namespace Detail { +namespace MathFuncs { + +// Clad can't generate the pullback for binNumber because of the +// std::lower_bound usage. But since binNumber returns an integer, and such +// functions have mathematically no derivatives anyway, we just declare a +// custom dummy pullback that does nothing. +template +void binNumber_pullback(Types...) +{ +} + +} // namespace MathFuncs +} // namespace Detail } // namespace RooFit +} // namespace custom_derivatives +} // namespace clad #endif diff --git a/roofit/roofitcore/inc/RooMinimizer.h b/roofit/roofitcore/inc/RooMinimizer.h index 89ea7d5294748..d8e89fb3be232 100644 --- a/roofit/roofitcore/inc/RooMinimizer.h +++ b/roofit/roofitcore/inc/RooMinimizer.h @@ -60,6 +60,7 @@ class RooMinimizer : public TObject { double Edm() const { return fEdm; } bool IsValid() const { return fValid; } int Status() const { return fStatus; } + void GetCovarianceMatrix(TMatrixDSym &cov) const; bool isParameterFixed(unsigned int ipar) const; diff --git a/roofit/roofitcore/inc/RooMultiVarGaussian.h b/roofit/roofitcore/inc/RooMultiVarGaussian.h index 30d26b09ae1f6..e30c185c9f6f9 100644 --- a/roofit/roofitcore/inc/RooMultiVarGaussian.h +++ b/roofit/roofitcore/inc/RooMultiVarGaussian.h @@ -84,6 +84,10 @@ class RooMultiVarGaussian : public RooAbsPdf { static void blockDecompose(const TMatrixD& input, const std::vector& map1, const std::vector& map2, TMatrixDSym& S11, TMatrixD& S12, TMatrixD& S21, TMatrixDSym& S22) ; + void translate(RooFit::Detail::CodeSquashContext &ctx) const override; + std::string + buildCallToAnalyticIntegral(Int_t code, const char *rangeName, RooFit::Detail::CodeSquashContext &ctx) const override; + protected: void decodeCode(Int_t code, std::vector& map1, std::vector& map2) const; diff --git a/roofit/roofitcore/inc/RooUniformBinning.h b/roofit/roofitcore/inc/RooUniformBinning.h index 13264e1d694d0..36a9ee91177fc 100644 --- a/roofit/roofitcore/inc/RooUniformBinning.h +++ b/roofit/roofitcore/inc/RooUniformBinning.h @@ -44,6 +44,8 @@ class RooUniformBinning : public RooAbsBinning { double averageBinWidth() const override { return _binw ; } double* array() const override ; + std::string translateBinNumber(RooFit::Detail::CodeSquashContext &ctx, RooAbsArg const &var, int coef) const override; + protected: mutable std::vector _array; ///(&*beg, std::distance(beg, end)); + return std::span(_vec.data(), std::distance(beg, end)); } std::size_t size() const { return _vec.size() ; } diff --git a/roofit/roofitcore/src/ConstraintHelpers.cxx b/roofit/roofitcore/src/ConstraintHelpers.cxx index dc1836210e42f..3f6afcbc8e9dc 100644 --- a/roofit/roofitcore/src/ConstraintHelpers.cxx +++ b/roofit/roofitcore/src/ConstraintHelpers.cxx @@ -52,7 +52,7 @@ getGlobalObservables(RooAbsPdf const &pdf, RooArgSet const *globalObservables, c if (globalObservablesTag) { std::unique_ptr allVars{pdf.getVariables()}; - return std::unique_ptr{static_cast(allVars->selectByAttrib(globalObservablesTag, true))}; + return std::unique_ptr{allVars->selectByAttrib(globalObservablesTag, true)}; } // no global observables specified diff --git a/roofit/roofitcore/src/FitHelpers.cxx b/roofit/roofitcore/src/FitHelpers.cxx index 97bfeefe975bf..b29fcc88a915e 100644 --- a/roofit/roofitcore/src/FitHelpers.cxx +++ b/roofit/roofitcore/src/FitHelpers.cxx @@ -105,7 +105,7 @@ int calcAsymptoticCorrectedCovariance(RooAbsReal &pdf, RooMinimizer &minimizer, const RooArgList &floated = rw->floatParsFinal(); RooArgSet allparams; logpdf.getParameters(data.get(), allparams); - std::unique_ptr floatingparams{static_cast(allparams.selectByAttrib("Constant", false))}; + std::unique_ptr floatingparams{allparams.selectByAttrib("Constant", false)}; const double eps = 1.0e-4; @@ -317,7 +317,7 @@ void resetFitrangeAttributes(RooAbsArg &pdf, RooAbsData const &data, std::string pdf.setStringAttribute("fitrange", fitrangeValue.substr(0, fitrangeValue.size() - 1).c_str()); } -std::unique_ptr createSimultaneousNLL(RooSimultaneous const &simPdf, bool isExtended, +std::unique_ptr createSimultaneousNLL(RooSimultaneous const &simPdf, bool isSimPdfExtended, std::string const &rangeName, RooFit::OffsetMode offset) { RooAbsCategoryLValue const &simCat = simPdf.indexCat(); @@ -341,9 +341,14 @@ std::unique_ptr createSimultaneousNLL(RooSimultaneous const &simPdf, if (RooAbsPdf *pdf = simPdf.getPdf(catName.c_str())) { auto name = std::string("nll_") + pdf->GetName(); - std::unique_ptr observables( - static_cast(std::unique_ptr(pdf->getVariables())->selectByAttrib("__obs__", true))); - auto nll = std::make_unique(name.c_str(), name.c_str(), *pdf, *observables, isExtended, offset); + std::unique_ptr observables{ + std::unique_ptr(pdf->getVariables())->selectByAttrib("__obs__", true)}; + // In a simultaneous fit, it is allowed that only a subset of the pdfs + // are extended. Therefore, we have to make sure that we don't request + // extended NLL objects for channels that can't be extended. + const bool isPdfExtended = isSimPdfExtended && pdf->extendMode() != RooAbsPdf::CanNotBeExtended; + auto nll = + std::make_unique(name.c_str(), name.c_str(), *pdf, *observables, isPdfExtended, offset); // Rename the special variables nll->setPrefix(std::string("_") + catName + "_"); nllTerms.addOwned(std::move(nll)); diff --git a/roofit/roofitcore/src/RooAbsBinning.cxx b/roofit/roofitcore/src/RooAbsBinning.cxx index 190bc164d4757..f34487b33b64f 100644 --- a/roofit/roofitcore/src/RooAbsBinning.cxx +++ b/roofit/roofitcore/src/RooAbsBinning.cxx @@ -26,6 +26,8 @@ This class defines the interface to retrieve bin boundaries, ranges etc. #include "RooAbsBinning.h" #include "RooAbsReal.h" +#include "RooMsgService.h" + #include "TBuffer.h" #include "TClass.h" @@ -135,3 +137,9 @@ void RooAbsBinning::Streamer(TBuffer &R__b) R__b.SetByteCount(R__c, true); } } + +std::string RooAbsBinning::translateBinNumber(RooFit::Detail::CodeSquashContext &, RooAbsArg const &, int) const +{ + oocoutE(nullptr, InputArguments) << "This binning doesn't support codegen!" << std::endl; + return ""; +} diff --git a/roofit/roofitcore/src/RooAbsCollection.cxx b/roofit/roofitcore/src/RooAbsCollection.cxx index fd91282f3b87c..1d903b29fe8a7 100644 --- a/roofit/roofitcore/src/RooAbsCollection.cxx +++ b/roofit/roofitcore/src/RooAbsCollection.cxx @@ -1582,18 +1582,6 @@ void RooAbsCollection::sortTopologically() { } } -//////////////////////////////////////////////////////////////////////////////// -/// Factory for legacy iterators. - -std::unique_ptr RooAbsCollection::makeLegacyIterator (bool forward) const { - if (!forward) { - ccoutE(DataHandling) << "The legacy RooFit collection iterators don't support reverse iterations, any more. " - << "Use begin() and end()" << std::endl; - } - return std::make_unique(_list); -} - - //////////////////////////////////////////////////////////////////////////////// /// Insert an element into the owned collections. void RooAbsCollection::insert(RooAbsArg* item) { diff --git a/roofit/roofitcore/src/RooBinning.cxx b/roofit/roofitcore/src/RooBinning.cxx index 3f24b3cdeea0c..dd8dd38cff923 100644 --- a/roofit/roofitcore/src/RooBinning.cxx +++ b/roofit/roofitcore/src/RooBinning.cxx @@ -26,15 +26,18 @@ the user to add single bin boundaries, mirrored pairs, or sets of uniformly spaced boundaries. **/ -#include "Riostream.h" -#include "RooBinning.h" -#include "RooDouble.h" -#include "RooAbsPdf.h" -#include "RooRealVar.h" -#include "RooNumber.h" -#include "RooMsgService.h" -#include "TBuffer.h" -#include "TList.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include #include #include @@ -101,8 +104,7 @@ RooBinning::~RooBinning() bool RooBinning::addBoundary(double boundary) { - std::vector::iterator it = - std::lower_bound(_boundaries.begin(), _boundaries.end(), boundary); + auto it = std::lower_bound(_boundaries.begin(), _boundaries.end(), boundary); if (_boundaries.end() != it && *it == boundary) { // If boundary previously existed as range delimiter, // convert to regular boundary now @@ -130,8 +132,7 @@ void RooBinning::addBoundaryPair(double boundary, double mirrorPoint) bool RooBinning::removeBoundary(double boundary) { - std::vector::iterator it = std::lower_bound(_boundaries.begin(), - _boundaries.end(), boundary); + auto it = std::lower_bound(_boundaries.begin(), _boundaries.end(), boundary); if (_boundaries.end() != it && *it == boundary) { _boundaries.erase(it); // if some moron deletes the boundaries corresponding to the current @@ -161,28 +162,16 @@ void RooBinning::addUniform(Int_t nbins, double xlo, double xhi) } } -namespace { - -inline int rawBinNumberImpl(double x, std::vector const& boundaries) { - auto it = std::lower_bound(boundaries.begin(), boundaries.end(), x); - // always return valid bin number - while (boundaries.begin() != it && - (boundaries.end() == it || boundaries.end() == it + 1 || x < *it)) --it; - return it - boundaries.begin(); -} - -} - //////////////////////////////////////////////////////////////////////////////// /// Return sequential bin number that contains value x where bin /// zero is the first bin with an upper boundary above the lower bound /// of the range -void RooBinning::binNumbers(double const * x, int * bins, std::size_t n, int coef) const +void RooBinning::binNumbers(double const *x, int *bins, std::size_t n, int coef) const { - for(std::size_t i = 0; i < n; ++i) { - bins[i] += coef * (std::max(0, std::min(_nbins, rawBinNumberImpl(x[i], _boundaries) - _blo))); - } + for (std::size_t i = 0; i < n; ++i) { + bins[i] += RooFit::Detail::MathFuncs::binNumber(x[i], coef, _boundaries.data(), _boundaries.size(), _nbins, _blo); + } } //////////////////////////////////////////////////////////////////////////////// @@ -239,9 +228,8 @@ void RooBinning::updateBinCount() _nbins = -1; return; } - _blo = rawBinNumberImpl(_xlo, _boundaries); - std::vector::const_iterator it = std::lower_bound( - _boundaries.begin(), _boundaries.end(), _xhi); + _blo = RooFit::Detail::MathFuncs::rawBinNumber(_xlo, _boundaries.data(), _boundaries.size()); + auto it = std::lower_bound(_boundaries.begin(), _boundaries.end(), _xhi); if (_boundaries.begin() != it && (_boundaries.end() == it || _xhi < *it)) --it; const Int_t bhi = it - _boundaries.begin(); _nbins = bhi - _blo; @@ -357,3 +345,8 @@ void RooBinning::Streamer(TBuffer &R__b) R__b.WriteClassBuffer(RooBinning::Class(),this); } } + +std::string RooBinning::translateBinNumber(RooFit::Detail::CodeSquashContext &ctx, RooAbsArg const &var, int coef) const +{ + return ctx.buildCall("RooFit::Detail::MathFuncs::binNumber", var, coef, _boundaries, _boundaries.size(), _nbins, _blo); +} diff --git a/roofit/roofitcore/src/RooCmdArg.cxx b/roofit/roofitcore/src/RooCmdArg.cxx index 9cf58cc147af7..1e93006f35dc9 100644 --- a/roofit/roofitcore/src/RooCmdArg.cxx +++ b/roofit/roofitcore/src/RooCmdArg.cxx @@ -34,6 +34,10 @@ that create and fill these generic containers #include "Riostream.h" #include "RooArgSet.h" +#include "RooFitImplHelpers.h" + +#include +#include #include #include @@ -207,13 +211,86 @@ void RooCmdArg::setSet(Int_t idx,const RooArgSet& set) _c[idx].add(set) ; } +std::string RooCmdArg::constructorCode() const +{ + std::array needs; + needs[0] = true; // name + needs[1] = true; // i1 + needs[2] = _i[1] != 0; // i2 + needs[3] = _d[0] != 0; // d1 + needs[4] = _d[1] != 0; // d2 + needs[5] = !_s[0].empty(); // s1 + needs[6] = !_s[1].empty(); // s2 + needs[7] = _o[0]; // o1 + needs[8] = _o[1]; // o2 + needs[9] = !_argList.empty(); // ca + needs[10] = !_s[2].empty(); // s3 + needs[11] = _c; // c1 + needs[12] = _c && !_c[1].empty(); // c2 + + // figure out until which point we actually need to pass constructor + // arguments + bool b = false; + for (int i = needs.size() - 1; i >= 0; --i) { + b |= needs[i]; + needs[i] = b; + } + + std::stringstream ss; + + // The first two arguments always need to be passed + ss << "RooCmdArg(\"" << GetName() << "\", " << _i[0]; + + if (needs[2]) + ss << ", " << _i[1]; + if (needs[3]) + ss << ", " << _d[0]; + if (needs[4]) + ss << ", " << _d[1]; + if (needs[5]) + ss << ", " << (!_s[0].empty() ? "\"" + _s[0] + "\"" : "\"\""); + if (needs[6]) + ss << ", " << (!_s[1].empty() ? "\"" + _s[1] + "\"" : "\"\""); + if (needs[7]) + ss << ", " << (_o[0] ? "\"" + std::string(_o[0]->GetName()) + "\"" : "0"); + if (needs[8]) + ss << ", " << (_o[1] ? "\"" + std::string(_o[1]->GetName()) + "\"" : "0"); + if (needs[9]) { + ss << ", "; + if (!_argList.empty()) { + ss << "{\n"; + for (std::size_t i = 0; i < _argList.size(); ++i) { + if (auto *cmdArg = dynamic_cast(_argList.At(i))) { + ss << cmdArg->constructorCode() << "\n"; + } + } + ss << "}\n"; + } else { + ss << 0; + } + } + if (needs[10]) + ss << ", " << (!_s[2].empty() ? "\"" + _s[2] + "\"" : "\"\""); + if (needs[11]) + ss << ", RooArgSet(" << RooHelpers::getColonSeparatedNameString(_c[0], ',') << ")"; + if (needs[12]) + ss << ", RooArgSet(" << RooHelpers::getColonSeparatedNameString(_c[1], ',') << ")"; + ss << ")"; + + return ss.str(); +} //////////////////////////////////////////////////////////////////////////////// -/// Print contents -void RooCmdArg::Print(const char*) const { - std::cout << GetName() - << ":\ndoubles\t" << _d[0] << " " << _d[1] - << "\nints\t" << _i[0] << " " << _i[1] - << "\nstrings\t" << _s[0] << " " << _s[1] << " " << _s[2] - << "\nobjects\t" << _o[0] << " " << _o[1] << std::endl; +// Print contents +void RooCmdArg::Print(const char *opts) const +{ + TString o{opts}; + if (o.Contains("v")) { + std::cout << constructorCode() << std::endl; + return; + } + + std::cout << GetName() << ":\ndoubles\t" << _d[0] << " " << _d[1] << "\nints\t" << _i[0] << " " << _i[1] + << "\nstrings\t" << _s[0] << " " << _s[1] << " " << _s[2] << "\nobjects\t" << _o[0] << " " << _o[1] + << std::endl; } diff --git a/roofit/roofitcore/src/RooDataHist.cxx b/roofit/roofitcore/src/RooDataHist.cxx index cb31a6f73ca6a..f92642bc04f0e 100644 --- a/roofit/roofitcore/src/RooDataHist.cxx +++ b/roofit/roofitcore/src/RooDataHist.cxx @@ -996,19 +996,13 @@ std::string RooDataHist::declWeightArrayForCodeSquash(RooFit::Detail::CodeSquash bool correctForBinSize) const { std::vector vals(_arrSize); - if (correctForBinSize) { - for (std::size_t i = 0; i < vals.size(); ++i) { - vals[i] = _wgt[i] / _binv[i]; - } - } else { - for (std::size_t i = 0; i < vals.size(); ++i) { - vals[i] = _wgt[i]; - } + for (std::size_t i = 0; i < vals.size(); ++i) { + vals[i] = correctForBinSize ? _wgt[i] / _binv[i] : _wgt[i]; } return ctx.buildArg(vals); } -std::string RooDataHist::calculateTreeIndexForCodeSquash(RooAbsArg const * /*klass*/, RooFit::Detail::CodeSquashContext &ctx, +std::string RooDataHist::calculateTreeIndexForCodeSquash(RooFit::Detail::CodeSquashContext &ctx, const RooAbsCollection &coords, bool reverse) const { assert(coords.size() == _vars.size()); @@ -1027,16 +1021,9 @@ std::string RooDataHist::calculateTreeIndexForCodeSquash(RooAbsArg const * /*kla coutE(InputArguments) << "RooHistPdf::weight(" << GetName() << ") ERROR: Code Squashing currently does not support category values." << std::endl; return ""; - } else if (!dynamic_cast(binning)) { - coutE(InputArguments) << "RooHistPdf::weight(" << GetName() - << ") ERROR: Code Squashing currently only supports uniformly binned cases." - << std::endl; - return ""; } - std::string const &bin = ctx.buildCall("RooFit::Detail::MathFuncs::getUniformBinning", binning->lowBound(), - binning->highBound(), *theVar, binning->numBins()); - code += " + " + std::to_string(idxMult) + " * " + bin; + code += " + " + binning->translateBinNumber(ctx, *theVar, idxMult); // Use RooAbsLValue here because it also generalized to categories, which // is useful in the future. dynamic_cast because it's a cross-cast. diff --git a/roofit/roofitcore/src/RooHistPdf.cxx b/roofit/roofitcore/src/RooHistPdf.cxx index fc11d305a34d0..fcb2ccbf2c965 100644 --- a/roofit/roofitcore/src/RooHistPdf.cxx +++ b/roofit/roofitcore/src/RooHistPdf.cxx @@ -240,7 +240,7 @@ void RooHistPdf::rooHistTranslateImpl(RooAbsArg const *klass, RooFit::Detail::Co binning.highBound(), *obs[0], binning.numBins(), weightArr)); return; } - std::string const &offset = dataHist->calculateTreeIndexForCodeSquash(klass, ctx, obs); + std::string const &offset = dataHist->calculateTreeIndexForCodeSquash(ctx, obs); std::string weightArr = dataHist->declWeightArrayForCodeSquash(ctx, correctForBinSize); ctx.addResult(klass, "*(" + weightArr + " + " + offset + ")"); } diff --git a/roofit/roofitcore/src/RooMinimizer.cxx b/roofit/roofitcore/src/RooMinimizer.cxx index 77449395ec8b7..9d17735a34d82 100644 --- a/roofit/roofitcore/src/RooMinimizer.cxx +++ b/roofit/roofitcore/src/RooMinimizer.cxx @@ -1179,3 +1179,14 @@ bool RooMinimizer::FitResult::isParameterFixed(unsigned int ipar) const { return fFixedParams.find(ipar) != fFixedParams.end(); } + +void RooMinimizer::FitResult::GetCovarianceMatrix(TMatrixDSym &covs) const +{ + const size_t nParams = fParams.size(); + covs.ResizeTo(nParams, nParams); + for (std::size_t ic = 0; ic < nParams; ic++) { + for (std::size_t ii = 0; ii < nParams; ii++) { + covs(ic, ii) = covMatrix(fCovMatrix, ic, ii); + } + } +} diff --git a/roofit/roofitcore/src/RooMultiVarGaussian.cxx b/roofit/roofitcore/src/RooMultiVarGaussian.cxx index cc883f86accb6..8c66cada01437 100644 --- a/roofit/roofitcore/src/RooMultiVarGaussian.cxx +++ b/roofit/roofitcore/src/RooMultiVarGaussian.cxx @@ -187,7 +187,11 @@ double RooMultiVarGaussian::evaluate() const return exp(-0.5*alpha) ; } - +void RooMultiVarGaussian::translate(RooFit::Detail::CodeSquashContext &ctx) const +{ + std::span covISpan{_covI.GetMatrixArray(), static_cast(_covI.GetNoElements())}; + ctx.addResult(this, ctx.buildCall("RooFit::Detail::MathFuncs::multiVarGaussian", _x.size(), _x, _mu, covISpan)); +} //////////////////////////////////////////////////////////////////////////////// @@ -310,6 +314,19 @@ double RooMultiVarGaussian::analyticalIntegral(Int_t code, const char* /*rangeNa } +std::string RooMultiVarGaussian::buildCallToAnalyticIntegral(Int_t code, const char *rangeName, + RooFit::Detail::CodeSquashContext & /*ctx*/) const +{ + if (code != -1) { + std::stringstream errorMsg; + errorMsg << "Partial integrals over RooMultiVarGaussian are not supported."; + coutE(Minimization) << errorMsg.str() << std::endl; + throw std::runtime_error(errorMsg.str().c_str()); + } + + return std::to_string(analyticalIntegral(code, rangeName)); +} + //////////////////////////////////////////////////////////////////////////////// /// Check if cache entry was previously created diff --git a/roofit/roofitcore/src/RooProfileLL.cxx b/roofit/roofitcore/src/RooProfileLL.cxx index ad01d3ef896cf..ae140d0faeb08 100644 --- a/roofit/roofitcore/src/RooProfileLL.cxx +++ b/roofit/roofitcore/src/RooProfileLL.cxx @@ -30,8 +30,6 @@ as a MIGRAD minimization step is executed for each function evaluation #include "RooMsgService.h" #include "RooRealVar.h" -using std::endl; - ClassImp(RooProfileLL); @@ -121,7 +119,7 @@ RooFit::OwningPtr RooProfileLL::createProfile(const RooArgSet& param void RooProfileLL::initializeMinimizer() const { - coutI(Minimization) << "RooProfileLL::evaluate(" << GetName() << ") Creating instance of MINUIT" << endl ; + coutI(Minimization) << "RooProfileLL::evaluate(" << GetName() << ") Creating instance of MINUIT" << std::endl; bool smode = RooMsgService::instance().silentMode() ; RooMsgService::instance().setSilentMode(true) ; @@ -192,7 +190,7 @@ void RooProfileLL::validateAbsMin() const if (_paramFixed[par->GetName()] != par->isConstant()) { cxcoutI(Minimization) << "RooProfileLL::evaluate(" << GetName() << ") constant status of parameter " << par->GetName() << " has changed from " << (_paramFixed[par->GetName()]?"fixed":"floating") << " to " << (par->isConstant()?"fixed":"floating") - << ", recalculating absolute minimum" << endl ; + << ", recalculating absolute minimum" << std::endl; _absMinValid = false ; break ; } @@ -203,7 +201,7 @@ void RooProfileLL::validateAbsMin() const // If we don't have the absolute minimum w.r.t all observables, calculate that first if (!_absMinValid) { - cxcoutI(Minimization) << "RooProfileLL::evaluate(" << GetName() << ") determining minimum likelihood for current configurations w.r.t all observable" << endl ; + cxcoutI(Minimization) << "RooProfileLL::evaluate(" << GetName() << ") determining minimum likelihood for current configurations w.r.t all observable" << std::endl; if (!_minimizer) { @@ -235,7 +233,7 @@ void RooProfileLL::validateAbsMin() const _paramAbsMin.removeAll() ; // Only store non-constant parameters here! - _paramAbsMin.addClone(*std::unique_ptr{static_cast(_par.selectByAttrib("Constant",false))}); + _paramAbsMin.addClone(*std::unique_ptr{_par.selectByAttrib("Constant", false)}); _obsAbsMin.addClone(_obs) ; @@ -253,7 +251,7 @@ void RooProfileLL::validateAbsMin() const << static_cast(arg)->getVal() ; first=false ; } - ccxcoutI(Minimization) << ")" << endl ; + ccxcoutI(Minimization) << ")" << std::endl; } // Restore original parameter values diff --git a/roofit/roofitcore/src/RooRealMPFE.cxx b/roofit/roofitcore/src/RooRealMPFE.cxx index daa6ae53cb357..b2d40b640ca4b 100644 --- a/roofit/roofitcore/src/RooRealMPFE.cxx +++ b/roofit/roofitcore/src/RooRealMPFE.cxx @@ -169,7 +169,7 @@ void RooRealMPFE::initVars() // Retrieve non-constant parameters auto vars = _arg->getParameters(RooArgSet()); - //RooArgSet* ncVars = (RooArgSet*) vars->selectByAttrib("Constant",false) ; + // RooArgSet *ncVars = vars->selectByAttrib("Constant", false); RooArgList varList(*vars) ; // Save in lists diff --git a/roofit/roofitcore/src/RooSimultaneous.cxx b/roofit/roofitcore/src/RooSimultaneous.cxx index 148f3f7c614f1..1d736f3a29b30 100644 --- a/roofit/roofitcore/src/RooSimultaneous.cxx +++ b/roofit/roofitcore/src/RooSimultaneous.cxx @@ -422,10 +422,6 @@ bool RooSimultaneous::addPdf(const RooAbsPdf& pdf, const char* catLabel) return false ; } - - - - //////////////////////////////////////////////////////////////////////////////// /// Examine the pdf components and check if one of them can be extended or must be extended. /// It is enough to have one component that can be extended or must be extended to return the flag in @@ -433,39 +429,16 @@ bool RooSimultaneous::addPdf(const RooAbsPdf& pdf, const char* catLabel) RooAbsPdf::ExtendMode RooSimultaneous::extendMode() const { - bool anyCanExtend(false) ; - bool anyMustExtend(false) ; - - for (Int_t i=0 ; i<_numPdf ; i++) { - RooRealProxy* proxy = static_cast(_pdfProxyList.At(i)); - if (proxy) { - RooAbsPdf* pdf = static_cast(proxy->absArg()) ; - //cout << " now processing pdf " << pdf->GetName() << endl; - if (pdf->canBeExtended()) { - //cout << "RooSim::extendedMode(" << GetName() << ") component " << pdf->GetName() << " can be extended" - // << endl; - anyCanExtend = true; - } - if (pdf->mustBeExtended()) { - //cout << "RooSim::extendedMode(" << GetName() << ") component " << pdf->GetName() << " MUST be extended" << endl; - anyMustExtend = true; - } - } - } - if (anyMustExtend) { - //cout << "RooSim::extendedMode(" << GetName() << ") returning MustBeExtended" << endl ; - return MustBeExtended ; - } - if (anyCanExtend) { - //cout << "RooSim::extendedMode(" << GetName() << ") returning CanBeExtended" << endl ; - return CanBeExtended ; - } - //cout << "RooSim::extendedMode(" << GetName() << ") returning CanNotBeExtended" << endl ; - return CanNotBeExtended ; -} - - + bool anyCanExtend = false; + for (auto *proxy : static_range_cast(_pdfProxyList)) { + auto &pdf = static_cast(proxy->arg()); + if (pdf.mustBeExtended()) + return MustBeExtended; + anyCanExtend |= pdf.canBeExtended(); + } + return anyCanExtend ? CanBeExtended : CanNotBeExtended; +} //////////////////////////////////////////////////////////////////////////////// /// Return the current value: @@ -473,30 +446,43 @@ RooAbsPdf::ExtendMode RooSimultaneous::extendMode() const double RooSimultaneous::evaluate() const { - // Retrieve the proxy by index name - RooRealProxy* proxy = static_cast(_pdfProxyList.FindObject(_indexCat.label())) ; - - //assert(proxy!=0) ; - if (proxy==nullptr) return 0 ; - - // Calculate relative weighting factor for sim-pdfs of all extendable components - double catFrac(1) ; - if (canBeExtended()) { - double nEvtCat = (static_cast(proxy->absArg()))->expectedEvents(_normSet) ; - - double nEvtTot(0) ; - for(auto * proxy2 : static_range_cast(_pdfProxyList)) { - nEvtTot += (static_cast(proxy2->absArg()))->expectedEvents(_normSet) ; - } - catFrac=nEvtCat/nEvtTot ; - } + // Retrieve the proxy by index name + RooRealProxy *proxy = static_cast(_pdfProxyList.FindObject(_indexCat.label())); + + double nEvtTot = 1.0; + double nEvtCat = 1.0; + + // Calculate relative weighting factor for sim-pdfs of all extendable components + if (canBeExtended()) { + + nEvtTot = 0; + nEvtCat = 0; + + for (auto *proxy2 : static_range_cast(_pdfProxyList)) { + auto &pdf2 = static_cast(proxy2->arg()); + if(!pdf2.canBeExtended()) { + // If one of the pdfs can't be expected, reset the normalization + // factor to one and break out of the loop. + nEvtTot = 1.0; + nEvtCat = 1.0; + break; + } + const double nEvt = pdf2.expectedEvents(_normSet); + nEvtTot += nEvt; + if (proxy == proxy2) { + // Matching by proxy by pointer rather than pdfs, because it's + // possible to have the same pdf used in different states. + nEvtCat += nEvt; + } + } + } + double catFrac = nEvtCat / nEvtTot; - // Return the selected PDF value, normalized by the number of index states - return (static_cast(proxy->absArg()))->getVal(_normSet)*catFrac ; + // Return the selected PDF value, normalized by the relative number of + // expected events if applicable. + return *proxy * catFrac; } - - //////////////////////////////////////////////////////////////////////////////// /// Return the number of expected events: If the index is in nset, /// then return the sum of the expected events of all components, @@ -1226,8 +1212,8 @@ RooSimultaneous::compileForNormSet(RooArgSet const &normSet, RooFit::Detail::Com markObs(pdfClone.get(), prefix, normSet); - std::unique_ptr pdfNormSet( - static_cast(std::unique_ptr(pdfClone->getVariables())->selectByAttrib("__obs__", true))); + std::unique_ptr pdfNormSet{ + std::unique_ptr(pdfClone->getVariables())->selectByAttrib("__obs__", true)}; if (rangeName) { pdfClone->setNormRange(RooHelpers::getRangeNameForSimComponent(rangeName, splitRange, catName).c_str()); diff --git a/roofit/roofitcore/src/RooTreeDataStore.cxx b/roofit/roofitcore/src/RooTreeDataStore.cxx index 201238466aefc..fee5b6a06f128 100644 --- a/roofit/roofitcore/src/RooTreeDataStore.cxx +++ b/roofit/roofitcore/src/RooTreeDataStore.cxx @@ -953,7 +953,7 @@ void RooTreeDataStore::cacheArgs(const RooAbsArg* owner, RooArgSet& newVarSet, c _cacheOwner = owner ; - std::unique_ptr constExprVarSet{static_cast(newVarSet.selectByAttrib("ConstantExpression",true))}; + std::unique_ptr constExprVarSet{newVarSet.selectByAttrib("ConstantExpression", true)}; bool doTreeFill = (_cachedVars.empty()) ; diff --git a/roofit/roofitcore/src/RooUniformBinning.cxx b/roofit/roofitcore/src/RooUniformBinning.cxx index be0dcfff43f8b..4857f0af3219e 100644 --- a/roofit/roofitcore/src/RooUniformBinning.cxx +++ b/roofit/roofitcore/src/RooUniformBinning.cxx @@ -25,10 +25,12 @@ is 'elastic': if the range changes the binning will change accordingly, unlike e.g. the binning of class RooBinning. **/ -#include "RooUniformBinning.h" -#include "RooMsgService.h" +#include -#include "Riostream.h" +#include +#include + +#include using std::endl; @@ -163,4 +165,8 @@ double* RooUniformBinning::array() const return _array.data(); } - +std::string +RooUniformBinning::translateBinNumber(RooFit::Detail::CodeSquashContext &ctx, RooAbsArg const &var, int coef) const +{ + return ctx.buildCall("RooFit::Detail::MathFuncs::uniformBinNumber", lowBound(), highBound(), var, numBins(), coef); +} diff --git a/roofit/roofitcore/src/TestStatistics/ConstantTermsOptimizer.cxx b/roofit/roofitcore/src/TestStatistics/ConstantTermsOptimizer.cxx index 8c6bed79f112c..a6c35fff8dfad 100644 --- a/roofit/roofitcore/src/TestStatistics/ConstantTermsOptimizer.cxx +++ b/roofit/roofitcore/src/TestStatistics/ConstantTermsOptimizer.cxx @@ -74,7 +74,7 @@ void ConstantTermsOptimizer::enableConstantTermsOptimization(RooAbsReal *functio arg->setCacheAndTrackHints(trackNodes); } // Do not set CacheAndTrack on constant expressions - std::unique_ptr constNodes{static_cast(trackNodes.selectByAttrib("Constant", true))}; + std::unique_ptr constNodes{trackNodes.selectByAttrib("Constant", true)}; trackNodes.remove(*constNodes); // Set CacheAndTrack flag on all remaining nodes @@ -97,8 +97,7 @@ void ConstantTermsOptimizer::enableConstantTermsOptimization(RooAbsReal *functio cacheArg->setOperMode(RooAbsArg::AClean); } - std::unique_ptr constNodes{ - static_cast(cached_nodes.selectByAttrib("ConstantExpressionCached", true))}; + std::unique_ptr constNodes{cached_nodes.selectByAttrib("ConstantExpressionCached", true)}; RooArgSet actualTrackNodes(cached_nodes); actualTrackNodes.remove(*constNodes); if (!constNodes->empty()) { diff --git a/roofit/roofitcore/src/TestStatistics/buildLikelihood.cxx b/roofit/roofitcore/src/TestStatistics/buildLikelihood.cxx index 9848aa840cad4..c9700c8afd093 100644 --- a/roofit/roofitcore/src/TestStatistics/buildLikelihood.cxx +++ b/roofit/roofitcore/src/TestStatistics/buildLikelihood.cxx @@ -52,7 +52,7 @@ namespace RooFit { * * The coupling of all these classes to RooMinimizer is made via the MinuitFcnGrad class, which owns the Wrappers that * calculate the likelihood components. - * + * * More extensive documentation is available at * https://github.com/root-project/root/blob/master/roofit/doc/developers/test_statistics.md */ @@ -97,7 +97,8 @@ RooArgSet getConstraintsSet(RooAbsPdf *pdf, RooAbsData *data, RooArgSet constrai global_observables.removeAll(); } std::unique_ptr allVars{pdf->getVariables()}; - global_observables.add(*dynamic_cast(allVars->selectByAttrib(global_observables_tag.c_str(), true))); + global_observables.add( + *std::unique_ptr{allVars->selectByAttrib(global_observables_tag.c_str(), true)}); oocoutI(nullptr, Minimization) << "User-defined specification of global observables definition with tag named '" << global_observables_tag << "'" << std::endl; } else if (global_observables.empty()) { @@ -109,7 +110,7 @@ RooArgSet getConstraintsSet(RooAbsPdf *pdf, RooAbsData *data, RooArgSet constrai << "p.d.f. provides built-in specification of global observables definition with tag named '" << defGlobObsTag << "'" << std::endl; std::unique_ptr allVars{pdf->getVariables()}; - global_observables.add(*dynamic_cast(allVars->selectByAttrib(defGlobObsTag, true))); + global_observables.add(*std::unique_ptr{allVars->selectByAttrib(defGlobObsTag, true)}); } } diff --git a/roofit/roofitcore/test/stressRooFit_tests.h b/roofit/roofitcore/test/stressRooFit_tests.h index 4be3413d21cb8..3d1ed7fe83305 100644 --- a/roofit/roofitcore/test/stressRooFit_tests.h +++ b/roofit/roofitcore/test/stressRooFit_tests.h @@ -85,7 +85,9 @@ using namespace RooFit; class TestBasic101 : public RooUnitTest { public: TestBasic101(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Fitting,plotting & event generation of basic p.d.f", refFile, writeRef, verbose){}; + : RooUnitTest("Fitting,plotting & event generation of basic p.d.f", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -145,7 +147,9 @@ class TestBasic101 : public RooUnitTest { class TestBasic102 : public RooUnitTest { public: TestBasic102(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Data import methods", refFile, writeRef, verbose){}; + : RooUnitTest("Data import methods", refFile, writeRef, verbose) + { + } std::unique_ptr makeTH1() { @@ -276,7 +280,9 @@ class TestBasic102 : public RooUnitTest { class TestBasic103 : public RooUnitTest { public: TestBasic103(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Interpreted expression p.d.f.", refFile, writeRef, verbose){}; + : RooUnitTest("Interpreted expression p.d.f.", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -360,7 +366,9 @@ class TestBasic103 : public RooUnitTest { class TestBasic105 : public RooUnitTest { public: TestBasic105(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("C++ function binding operator p.d.f", refFile, writeRef, verbose){}; + : RooUnitTest("C++ function binding operator p.d.f", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -423,7 +431,9 @@ class TestBasic105 : public RooUnitTest { class TestBasic108 : public RooUnitTest { public: TestBasic108(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Non-standard binning in counting and asymmetry plots", refFile, writeRef, verbose){}; + : RooUnitTest("Non-standard binning in counting and asymmetry plots", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -524,7 +534,9 @@ class TestBasic108 : public RooUnitTest { class TestBasic109 : public RooUnitTest { public: TestBasic109(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Calculation of chi^2 and residuals in plots", refFile, writeRef, verbose){}; + : RooUnitTest("Calculation of chi^2 and residuals in plots", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -596,7 +608,9 @@ class TestBasic109 : public RooUnitTest { class TestBasic110 : public RooUnitTest { public: TestBasic110(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Normalization of p.d.f.s in 1D", refFile, writeRef, verbose){}; + : RooUnitTest("Normalization of p.d.f.s in 1D", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -659,7 +673,9 @@ class TestBasic110 : public RooUnitTest { class TestBasic111 : public RooUnitTest { public: TestBasic111(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Numeric integration configuration", refFile, writeRef, verbose){}; + : RooUnitTest("Numeric integration configuration", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -728,7 +744,9 @@ class TestBasic111 : public RooUnitTest { class TestBasic201 : public RooUnitTest { public: TestBasic201(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Addition operator p.d.f.", refFile, writeRef, verbose){}; + : RooUnitTest("Addition operator p.d.f.", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -821,7 +839,9 @@ class TestBasic201 : public RooUnitTest { class TestBasic202 : public RooUnitTest { public: TestBasic202(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Extended ML fits to addition operator p.d.f.s", refFile, writeRef, verbose){}; + : RooUnitTest("Extended ML fits to addition operator p.d.f.s", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -910,7 +930,9 @@ class TestBasic202 : public RooUnitTest { class TestBasic203 : public RooUnitTest { public: TestBasic203(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Basic fitting and plotting in ranges", refFile, writeRef, verbose){}; + : RooUnitTest("Basic fitting and plotting in ranges", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -971,7 +993,9 @@ class TestBasic203 : public RooUnitTest { class TestBasic204 : public RooUnitTest { public: TestBasic204(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Extended ML fit in sub range", refFile, writeRef, verbose){}; + : RooUnitTest("Extended ML fit in sub range", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1035,7 +1059,9 @@ class TestBasic204 : public RooUnitTest { class TestBasic205 : public RooUnitTest { public: TestBasic205(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Component plotting variations", refFile, writeRef, verbose){}; + : RooUnitTest("Component plotting variations", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1127,7 +1153,9 @@ class TestBasic205 : public RooUnitTest { class TestBasic208 : public RooUnitTest { public: TestBasic208(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("FFT Convolution operator p.d.f.", refFile, writeRef, verbose){}; + : RooUnitTest("FFT Convolution operator p.d.f.", refFile, writeRef, verbose) + { + } bool isTestAvailable() override { @@ -1213,7 +1241,9 @@ class TestBasic208 : public RooUnitTest { class TestBasic209 : public RooUnitTest { public: TestBasic209(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Analytical convolution operator", refFile, writeRef, verbose){}; + : RooUnitTest("Analytical convolution operator", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1279,7 +1309,9 @@ class TestBasic209 : public RooUnitTest { class TestBasic301 : public RooUnitTest { public: TestBasic301(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Composition extension of basic p.d.f", refFile, writeRef, verbose){}; + : RooUnitTest("Composition extension of basic p.d.f", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1332,7 +1364,9 @@ class TestBasic301 : public RooUnitTest { class TestBasic302 : public RooUnitTest { public: TestBasic302(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Sum and product utility functions", refFile, writeRef, verbose){}; + : RooUnitTest("Sum and product utility functions", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1433,7 +1467,9 @@ class TestBasic303 : public RooUnitTest { } TestBasic303(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Conditional use of F(x|y)", refFile, writeRef, verbose){}; + : RooUnitTest("Conditional use of F(x|y)", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1499,7 +1535,9 @@ class TestBasic303 : public RooUnitTest { class TestBasic304 : public RooUnitTest { public: TestBasic304(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Product operator p.d.f. with uncorrelated terms", refFile, writeRef, verbose){}; + : RooUnitTest("Product operator p.d.f. with uncorrelated terms", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1553,7 +1591,9 @@ class TestBasic304 : public RooUnitTest { class TestBasic305 : public RooUnitTest { public: TestBasic305(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Product operator p.d.f. with conditional term", refFile, writeRef, verbose){}; + : RooUnitTest("Product operator p.d.f. with conditional term", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1617,7 +1657,9 @@ class TestBasic305 : public RooUnitTest { class TestBasic306 : public RooUnitTest { public: TestBasic306(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Conditional use of per-event error p.d.f. F(t|dt)", refFile, writeRef, verbose){}; + : RooUnitTest("Conditional use of per-event error p.d.f. F(t|dt)", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1693,7 +1735,9 @@ class TestBasic306 : public RooUnitTest { class TestBasic307 : public RooUnitTest { public: TestBasic307(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Full per-event error p.d.f. F(t|dt)G(dt)", refFile, writeRef, verbose){}; + : RooUnitTest("Full per-event error p.d.f. F(t|dt)G(dt)", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1765,7 +1809,9 @@ class TestBasic307 : public RooUnitTest { class TestBasic308 : public RooUnitTest { public: TestBasic308(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Normalization of p.d.f.s in 2D", refFile, writeRef, verbose){}; + : RooUnitTest("Normalization of p.d.f.s in 2D", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1842,7 +1888,9 @@ class TestBasic308 : public RooUnitTest { class TestBasic310 : public RooUnitTest { public: TestBasic310(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Data and p.d.f projection in category slice", refFile, writeRef, verbose){}; + : RooUnitTest("Data and p.d.f projection in category slice", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1917,7 +1965,9 @@ class TestBasic310 : public RooUnitTest { class TestBasic311 : public RooUnitTest { public: TestBasic311(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Data and p.d.f projection in sub range", refFile, writeRef, verbose){}; + : RooUnitTest("Data and p.d.f projection in sub range", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -1984,7 +2034,9 @@ class TestBasic311 : public RooUnitTest { class TestBasic312 : public RooUnitTest { public: TestBasic312(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Fit in multiple rectangular ranges", refFile, writeRef, verbose){}; + : RooUnitTest("Fit in multiple rectangular ranges", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2076,7 +2128,9 @@ class TestBasic312 : public RooUnitTest { class TestBasic313 : public RooUnitTest { public: TestBasic313(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Integration over non-rectangular regions", refFile, writeRef, verbose){}; + : RooUnitTest("Integration over non-rectangular regions", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2143,7 +2197,9 @@ class TestBasic313 : public RooUnitTest { class TestBasic314 : public RooUnitTest { public: TestBasic314(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Fit with non-rectangular observable boundaries", refFile, writeRef, verbose){}; + : RooUnitTest("Fit with non-rectangular observable boundaries", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2199,7 +2255,9 @@ class TestBasic314 : public RooUnitTest { class TestBasic315 : public RooUnitTest { public: TestBasic315(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("P.d.f. marginalization through integration", refFile, writeRef, verbose){}; + : RooUnitTest("P.d.f. marginalization through integration", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2261,7 +2319,9 @@ class TestBasic315 : public RooUnitTest { class TestBasic316 : public RooUnitTest { public: TestBasic316(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Likelihood ratio projection plot", refFile, writeRef, verbose){}; + : RooUnitTest("Likelihood ratio projection plot", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2350,7 +2410,9 @@ class TestBasic316 : public RooUnitTest { class TestBasic402 : public RooUnitTest { public: TestBasic402(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Basic operations on datasets", refFile, writeRef, verbose){}; + : RooUnitTest("Basic operations on datasets", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2442,7 +2504,9 @@ class TestBasic402 : public RooUnitTest { class TestBasic403 : public RooUnitTest { public: TestBasic403(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Fits with weighted datasets", refFile, writeRef, verbose){}; + : RooUnitTest("Fits with weighted datasets", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2559,7 +2623,9 @@ class TestBasic403 : public RooUnitTest { class TestBasic404 : public RooUnitTest { public: TestBasic404(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Categories basic functionality", refFile, writeRef, verbose){}; + : RooUnitTest("Categories basic functionality", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2636,7 +2702,9 @@ class TestBasic404 : public RooUnitTest { class TestBasic405 : public RooUnitTest { public: TestBasic405(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Real-to-category functions", refFile, writeRef, verbose){}; + : RooUnitTest("Real-to-category functions", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2723,7 +2791,9 @@ class TestBasic405 : public RooUnitTest { class TestBasic406 : public RooUnitTest { public: TestBasic406(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Category-to-category functions", refFile, writeRef, verbose){}; + : RooUnitTest("Category-to-category functions", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2798,7 +2868,9 @@ class TestBasic406 : public RooUnitTest { class TestBasic501 : public RooUnitTest { public: TestBasic501(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Simultaneous p.d.f. operator", refFile, writeRef, verbose){}; + : RooUnitTest("Simultaneous p.d.f. operator", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -2910,7 +2982,9 @@ class TestBasic501 : public RooUnitTest { class TestBasic599 : public RooUnitTest { public: TestBasic599(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Workspace and p.d.f. persistence", refFile, writeRef, verbose){}; + : RooUnitTest("Workspace and p.d.f. persistence", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3114,7 +3188,9 @@ class TestBasic599 : public RooUnitTest { class TestBasic602 : public RooUnitTest { public: TestBasic602(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Chi2 minimization", refFile, writeRef, verbose){}; + : RooUnitTest("Chi2 minimization", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3173,7 +3249,9 @@ class TestBasic602 : public RooUnitTest { class TestBasic604 : public RooUnitTest { public: TestBasic604(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Auxiliary observable constraints", refFile, writeRef, verbose){}; + : RooUnitTest("Auxiliary observable constraints", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3239,7 +3317,9 @@ class TestBasic604 : public RooUnitTest { class TestBasic605 : public RooUnitTest { public: TestBasic605(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Profile Likelihood operator", refFile, writeRef, verbose){}; + : RooUnitTest("Profile Likelihood operator", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3320,7 +3400,9 @@ class TestBasic605 : public RooUnitTest { class TestBasic606 : public RooUnitTest { public: TestBasic606(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("NLL error handling", refFile, writeRef, verbose){}; + : RooUnitTest("NLL error handling", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3380,7 +3462,9 @@ class TestBasic606 : public RooUnitTest { class TestBasic607 : public RooUnitTest { public: TestBasic607(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Fit Result functionality", refFile, writeRef, verbose){}; + : RooUnitTest("Fit Result functionality", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3451,7 +3535,9 @@ class TestBasic607 : public RooUnitTest { class TestBasic609 : public RooUnitTest { public: TestBasic609(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Chi^2 fit to X-Y dataset", refFile, writeRef, verbose){}; + : RooUnitTest("Chi^2 fit to X-Y dataset", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3521,7 +3607,9 @@ class TestBasic609 : public RooUnitTest { class TestBasic701 : public RooUnitTest { public: TestBasic701(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Efficiency operator p.d.f. 1D", refFile, writeRef, verbose){}; + : RooUnitTest("Efficiency operator p.d.f. 1D", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3591,7 +3679,9 @@ class TestBasic701 : public RooUnitTest { class TestBasic702 : public RooUnitTest { public: TestBasic702(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Efficiency operator p.d.f. 2D", refFile, writeRef, verbose){}; + : RooUnitTest("Efficiency operator p.d.f. 2D", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3673,7 +3763,9 @@ class TestBasic702 : public RooUnitTest { class TestBasic703 : public RooUnitTest { public: TestBasic703(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Efficiency product operator p.d.f", refFile, writeRef, verbose){}; + : RooUnitTest("Efficiency product operator p.d.f", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3737,7 +3829,9 @@ class TestBasic703 : public RooUnitTest { class TestBasic704 : public RooUnitTest { public: TestBasic704(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Amplitude sum operator p.d.f", refFile, writeRef, verbose){}; + : RooUnitTest("Amplitude sum operator p.d.f", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3817,7 +3911,9 @@ class TestBasic705 : public RooUnitTest { double ctol() override { return 5e-2; } // very conservative, this is a numerically difficult test TestBasic705(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Linear morph operator p.d.f.", refFile, writeRef, verbose){}; + : RooUnitTest("Linear morph operator p.d.f.", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3931,7 +4027,9 @@ class TestBasic705 : public RooUnitTest { class TestBasic706 : public RooUnitTest { public: TestBasic706(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Histogram based p.d.f.s", refFile, writeRef, verbose){}; + : RooUnitTest("Histogram based p.d.f.s", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -3988,7 +4086,9 @@ class TestBasic706 : public RooUnitTest { class TestBasic707 : public RooUnitTest { public: TestBasic707(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Kernel estimation p.d.f.s", refFile, writeRef, verbose){}; + : RooUnitTest("Kernel estimation p.d.f.s", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -4070,7 +4170,9 @@ class TestBasic707 : public RooUnitTest { class TestBasic708 : public RooUnitTest { public: TestBasic708(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("B Physics p.d.f.s", refFile, writeRef, verbose){}; + : RooUnitTest("B Physics p.d.f.s", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -4244,7 +4346,9 @@ class TestBasic708 : public RooUnitTest { class TestBasic801 : public RooUnitTest { public: TestBasic801(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("Automated MC studies", refFile, writeRef, verbose){}; + : RooUnitTest("Automated MC studies", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -4327,7 +4431,9 @@ class TestBasic801 : public RooUnitTest { class TestBasic802 : public RooUnitTest { public: TestBasic802(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("MC Study with chi^2 calculator", refFile, writeRef, verbose){}; + : RooUnitTest("MC Study with chi^2 calculator", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -4411,7 +4517,9 @@ class TestBasic802 : public RooUnitTest { class TestBasic803 : public RooUnitTest { public: TestBasic803(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("MC Study with param rand. and Z calc", refFile, writeRef, verbose){}; + : RooUnitTest("MC Study with param rand. and Z calc", refFile, writeRef, verbose) + { + } bool testCode() override { @@ -4506,7 +4614,9 @@ class TestBasic803 : public RooUnitTest { class TestBasic804 : public RooUnitTest { public: TestBasic804(TFile *refFile, bool writeRef, int verbose) - : RooUnitTest("MC Studies with aux. obs. constraints", refFile, writeRef, verbose){}; + : RooUnitTest("MC Studies with aux. obs. constraints", refFile, writeRef, verbose) + { + } double htol() override { return 0.1; } // numerically very difficult test diff --git a/roofit/roofitcore/test/testGlobalObservables.cxx b/roofit/roofitcore/test/testGlobalObservables.cxx index 300d69f81c9e2..ecf006580f818 100644 --- a/roofit/roofitcore/test/testGlobalObservables.cxx +++ b/roofit/roofitcore/test/testGlobalObservables.cxx @@ -103,6 +103,7 @@ class GlobsTest : public testing::TestWithParam> } } + RooFit::EvalBackend const &evalBackend() { return _evalBackend; } RooWorkspace &ws() { return _ws; } RooDataSet &data() { return *_data; } RooDataSet &dataWithMeanSigmaGlobs() { return *_dataWithMeanSigmaGlobs; } @@ -323,7 +324,7 @@ TEST_P(GlobsTest, ResetDataToWrongData) wrongData->setGlobalObservables({gm, gs}); // check that the fit works when using the dataset with the correct values - std::unique_ptr nll{model.createNLL(dataWithMeanSigmaGlobs())}; + std::unique_ptr nll{model.createNLL(dataWithMeanSigmaGlobs(), EvalBackend(evalBackend()))}; auto res2 = minimize(model, *nll, dataWithMeanSigmaGlobs(), minimizerCfg()); EXPECT_TRUE(res1->isIdentical(*res2)) << "fitting an model with internal " "constraints in a RooPrdPdf gave a different result when global " @@ -362,7 +363,7 @@ TEST_P(GlobsTest, ResetDataToCorrectData) resetParameters(); // check that the fit doesn't work when using the dataset with the wrong values - std::unique_ptr nll{model.createNLL(*wrongData)}; + std::unique_ptr nll{model.createNLL(*wrongData, EvalBackend(evalBackend()))}; auto res2 = minimize(model, *nll, *wrongData, minimizerCfg()); EXPECT_TRUE(isNotIdentical(*res1, *res2)) << "fitting an model with internal " "constraints in a RooPrdPdf ignored the global " @@ -426,8 +427,8 @@ TEST_P(GlobsTest, ResetDataButSourceFromModel) resetParameters(); // check that the fit works when using the dataset with the correct values - std::unique_ptr nll{ - model.createNLL(dataWithMeanSigmaGlobs(), GlobalObservablesSource("model"), GlobalObservables(gm, gs))}; + std::unique_ptr nll{model.createNLL(dataWithMeanSigmaGlobs(), GlobalObservablesSource("model"), + GlobalObservables(gm, gs), EvalBackend(evalBackend()))}; auto res2 = minimize(model, *nll, dataWithMeanSigmaGlobs(), minimizerCfg()); EXPECT_TRUE(res1->isIdentical(*res2)); diff --git a/roofit/roofitcore/test/testRooSimultaneous.cxx b/roofit/roofitcore/test/testRooSimultaneous.cxx index 9852cacd14c1b..8e1bb49898f32 100644 --- a/roofit/roofitcore/test/testRooSimultaneous.cxx +++ b/roofit/roofitcore/test/testRooSimultaneous.cxx @@ -407,3 +407,52 @@ TEST(RooSimultaneous, ConditionalProdPdf) // RooSimultaneous, and one for the RooCategory. EXPECT_EQ(countGraphNodes(*compiledSim), countGraphNodes(*compiled) + 2); } + +// Test that we can evaluate a RooSimultaneous also if only a fraction of the +// channels can be extended. Also check if the likelihood can be created. +TEST(RooSimultaneous, PartiallyExtendedPdfs) +{ + RooWorkspace ws; + ws.factory("Gaussian::pdfA(x_a[-10, 10], mu_a[0, -10, 10], sigma_a[2.0, 0.1, 10.0])"); + ws.factory("Gaussian::pdfB(x_b[-10, 10], mu_b[0, -10, 10], sigma_b[2.0, 0.1, 10.0])"); + ws.factory("PROD::pdfAprod(pdfA)"); + ws.factory("ExtendPdf::pdfBext(pdfB, n_b[1000., 100., 10000.])"); + ws.factory("SIMUL::simPdf( cat[A=0,B=1], A=pdfAprod, B=pdfBext)"); + + RooArgSet observables{*ws.var("x_a"), *ws.var("x_b"), *ws.cat("cat")}; + + auto &simPdf = *ws.pdf("simPdf"); + std::cout << simPdf.getVal() << std::endl; + + // A completely extended pdf, just to easily create a toy dataset + ws.factory("ExtendPdf::pdfAext(pdfA, n_b[1000., 100., 10000.])"); + ws.factory("SIMUL::simPdfExtBoth( cat[A=0,B=1], A=pdfAext, B=pdfBext)"); + std::unique_ptr data{ws.pdf("simPdfExtBoth")->generate(observables)}; + + // Check if likelihood can be instantiated + std::unique_ptr nll{simPdf.createNLL(*data)}; +} + +// Make sure that one can use the same extended pdf instance for different +// channels, and the RooSimultaneous will still evaluate correctly. +TEST(RooSimultaneous, DuplicateExtendedPdfs) +{ + RooWorkspace ws; + + ws.factory("Uniform::u_a(x[0, 10])"); + ws.factory("Uniform::u_b(x)"); + ws.factory("ExtendPdf::pdf_a(u_a, n[1000, 100, 10000])"); + ws.factory("ExtendPdf::pdf_b(u_b, n)"); + + ws.factory("SIMUL::simPdf( c[A=0,B=1], A=pdf_a, B=pdf_a)"); + ws.factory("SIMUL::simPdfRef( c, A=pdf_a, B=pdf_b)"); + + RooArgSet normSet{*ws.var("x")}; + + RooAbsPdf &simPdf = *ws.pdf("simPdf"); + RooAbsPdf &simPdfRef = *ws.pdf("simPdfRef"); + double simPdfVal = simPdf.getVal(normSet); + + EXPECT_FLOAT_EQ(simPdfVal, 0.05); + EXPECT_DOUBLE_EQ(simPdfVal, simPdfRef.getVal(normSet)); +} diff --git a/roofit/roofitcore/test/testTestStatistics.cxx b/roofit/roofitcore/test/testTestStatistics.cxx index 15b5a22574f34..9902aede27b36 100644 --- a/roofit/roofitcore/test/testTestStatistics.cxx +++ b/roofit/roofitcore/test/testTestStatistics.cxx @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/roofit/roostats/src/HLFactory.cxx b/roofit/roostats/src/HLFactory.cxx index e2f041845d757..0025fa914e27a 100644 --- a/roofit/roostats/src/HLFactory.cxx +++ b/roofit/roostats/src/HLFactory.cxx @@ -50,7 +50,7 @@ HLFactory::HLFactory(const char *name, const char *fileName, bool isVerbose) { TString wsName(name); wsName += "_ws"; - fWs = new RooWorkspace(wsName, true); + fWs = new RooWorkspace(wsName); fSigBkgPdfNames.SetOwner(); fBkgPdfNames.SetOwner(); @@ -73,7 +73,7 @@ HLFactory::HLFactory(const char *name, RooWorkspace *externalWs, bool isVerbose) //////////////////////////////////////////////////////////////////////////////// -HLFactory::HLFactory() : TNamed("hlfactory", "hlfactory"), fWs(new RooWorkspace("hlfactory_ws", true)), fOwnWs(true) +HLFactory::HLFactory() : TNamed("hlfactory", "hlfactory"), fWs(new RooWorkspace("hlfactory_ws")), fOwnWs(true) { fSigBkgPdfNames.SetOwner(); fBkgPdfNames.SetOwner(); diff --git a/roofit/xroofit/CMakeLists.txt b/roofit/xroofit/CMakeLists.txt index 71dced555d2cf..ad951c77ea734 100644 --- a/roofit/xroofit/CMakeLists.txt +++ b/roofit/xroofit/CMakeLists.txt @@ -33,3 +33,5 @@ ROOT_STANDARD_LIBRARY_PACKAGE(RooFitXRooFit ) target_include_directories(RooFitXRooFit PRIVATE inc/RooFit) + +ROOT_ADD_TEST_SUBDIRECTORY(test) \ No newline at end of file diff --git a/roofit/xroofit/inc/RooFit/xRooFit/xRooFit.h b/roofit/xroofit/inc/RooFit/xRooFit/xRooFit.h index cb192ae4c3108..7d8a908e40d5d 100644 --- a/roofit/xroofit/inc/RooFit/xRooFit/xRooFit.h +++ b/roofit/xroofit/inc/RooFit/xRooFit/xRooFit.h @@ -66,6 +66,8 @@ class xRooFit { static RooCmdArg ReuseNLL(bool flag); // if should try to reuse the NLL object when it changes dataset static RooCmdArg Tolerance(double value); static RooCmdArg StrategySequence(const char *stratSeq); // control minimization strategy sequence + static RooCmdArg MaxIterations(int nIterations); + static constexpr double OBS = std::numeric_limits::quiet_NaN(); // Helper function for matching precision of a value and its error diff --git a/roofit/xroofit/inc/RooFit/xRooFit/xRooNLLVar.h b/roofit/xroofit/inc/RooFit/xRooFit/xRooNLLVar.h index df62227c2cba6..6879ea29b5727 100644 --- a/roofit/xroofit/inc/RooFit/xRooFit/xRooNLLVar.h +++ b/roofit/xroofit/inc/RooFit/xRooFit/xRooNLLVar.h @@ -38,7 +38,6 @@ class RooAbsReal; class RooAbsPdf; class RooAbsData; class RooAbsCollection; -class RooNLLVar; class RooConstraintSum; class RooRealVar; class RooCmdArg; @@ -93,6 +92,7 @@ class xRooNLLVar : public std::shared_ptr { class xRooFitResult : public std::shared_ptr { public: + xRooFitResult(const RooFitResult &fr); xRooFitResult(const std::shared_ptr &in, const std::shared_ptr &nll = nullptr); // : fNode(in) { } const RooFitResult *operator->() const; @@ -430,19 +430,27 @@ class xRooNLLVar : public std::shared_ptr { // total nll should be all these values + constraint term + extended term + simTerm [+binnedDataTerm if activated // binnedL option] - RooNLLVar *mainTerm() const; + /*RooAbsReal *mainTerm() const;*/ RooConstraintSum *constraintTerm() const; + double mainTermVal() const; + double constraintTermVal() const; + double getEntryVal(size_t entry) const; // get the Nll value for a specific entry - double extendedTerm() const; - double simTerm() const; - double binnedDataTerm() const; + double extendedTermVal() const; + double simTermVal() const; + double binnedDataTermVal() const; double getEntryBinWidth(size_t entry) const; double ndof() const; double saturatedVal() const; - double saturatedConstraintTerm() const; - double saturatedMainTerm() const; + [[deprecated("Use saturatedConstraintTermVal()")]] double saturatedConstraintTerm() const + { + return saturatedConstraintTermVal(); + } + double saturatedConstraintTermVal() const; + [[deprecated("Use saturatedMainTermVal()")]] double saturatedMainTerm() const { return saturatedMainTermVal(); } + double saturatedMainTermVal() const; double pgof() const; // a goodness-of-fit pvalue based on profile likelihood of a saturated model double mainTermPgof() const; double mainTermNdof() const; diff --git a/roofit/xroofit/inc/RooFit/xRooFit/xRooNode.h b/roofit/xroofit/inc/RooFit/xRooFit/xRooNode.h index 887cbbed54f25..52191ca549e13 100644 --- a/roofit/xroofit/inc/RooFit/xRooFit/xRooNode.h +++ b/roofit/xroofit/inc/RooFit/xRooFit/xRooNode.h @@ -304,7 +304,7 @@ class xRooNode : public TNamed, public std::vector> { xRooNode components() const; // additive children xRooNode factors() const; // multiplicative children xRooNode variations() const; // interpolated children (are bins a form of variation?) - xRooNode coefs() const; + xRooNode coefs(bool recurse = false) const; xRooNode coords(bool setVals = true) const; // will move to the coords in the process if setVals=true xRooNode bins() const; @@ -379,17 +379,38 @@ class xRooNode : public TNamed, public std::vector> { bool SetXaxis(int nbins, double low, double high) { return SetXaxis("xaxis", "", nbins, low, high); } bool SetXaxis(int nbins, const double *bins) { return SetXaxis("xaxis", "", nbins, bins); } - std::shared_ptr style(TObject *initObject = nullptr, bool autoCreate = true) const; + std::shared_ptr + style(TObject *initObject = nullptr, bool autoCreate = true) const; // DEPRECATED: TO BE REMOVED + xRooNode styles(TObject *initObject = nullptr, bool autoCreate = true) const; TAxis *GetXaxis() const; double GetBinData(int bin, const xRooNode &data = "obsData"); double GetBinContent(int bin) const { return GetBinContents(bin, bin).at(0); } std::vector GetBinContents(int binStart = 1, int binEnd = 0) const; // default will get all bins - double GetBinError(int bin, const xRooNode &fr = "") const; - std::vector GetBinErrors(int binStart = 1, int binEnd = 0, const xRooNode &fr = "") const; + double + GetBinError(int bin, const xRooNode &fr = "", int nToys = 0, bool errorsHi = false, bool errorsLo = false) const; + std::vector GetBinErrors(int binStart = 1, int binEnd = 0, const xRooNode &fr = "", int nToys = 0, + bool errorsHi = false, bool errorsLo = false) const; std::pair IntegralAndError(const xRooNode &fr = "", const char *rangeName = nullptr) const; + std::vector GetBinErrorsHi(int binStart = 1, int binEnd = 0, const xRooNode &fr = "", int nToys = 0) const + { + return GetBinErrors(binStart, binEnd, fr, nToys, true, false); + } + std::vector GetBinErrorsLo(int binStart = 1, int binEnd = 0, const xRooNode &fr = "", int nToys = 0) const + { + return GetBinErrors(binStart, binEnd, fr, nToys, false, true); + } + double GetBinErrorHi(int bin, const xRooNode &fr = "", int nToys = 0) const + { + return GetBinError(bin, fr, nToys, true, false); + } + double GetBinErrorLo(int bin, const xRooNode &fr = "", int nToys = 0) const + { + return GetBinError(bin, fr, nToys, false, true); + } + // methods to access default content and error double GetContent() const { return GetBinContent(fBinNumber); } double GetError(const xRooNode &fr = "") const @@ -401,11 +422,15 @@ class xRooNode : public TNamed, public std::vector> { // methods to access content and covariances of the CHILDREN of a node std::vector contents() const; TMatrixDSym covariances(const xRooNode &fr = "") const; - xRooNLLVar nll(const xRooNode &_data, std::initializer_list nllOpts) const; xRooNLLVar nll(const xRooNode &_data, const RooLinkedList &nllOpts) const; xRooNLLVar nll(const xRooNode &_data = "") const; // uses xRooFit::createNLLOption for nllOpts + xRooNLLVar + nll(const char *_data, + std::initializer_list nllOpts = {}) const; // exists to have sensible exception reporting in python + // (rather than conversion errors, which are incorrect) + xRooNode fitResult(const char *opt = "") const; // todo: make this 'fitResults' void SetFitResult(const RooFitResult *fr = nullptr); // null means will load prefit void SetFitResult(const std::shared_ptr &fr) { SetFitResult(fr.get()); } @@ -443,13 +468,16 @@ class xRooNode : public TNamed, public std::vector> { void SetChecked(bool val = true) { Checked(this, val); } /** @private */ - xRooNode histo(const xRooNode &vars = "x", const xRooNode &fr = "", bool content = true, bool errors = true) const; + xRooNode histo(const xRooNode &vars = "x", const xRooNode &fr = "", bool content = true, bool errors = true, + bool stack = true, bool errorsHi = false, bool errorsLo = false, int nErrorToys = 0) const; /** @private */ xRooNode filter(const xRooNode &range) const; TGraph *BuildGraph(RooAbsLValue *v = nullptr, bool includeZeros = false, TVirtualPad *fromPad = nullptr) const; TH1 *BuildHistogram(RooAbsLValue *v = nullptr, bool empty = false, bool errors = false, int binStart = 1, - int binEnd = 0, const xRooNode &fr = "") const; + int binEnd = 0, const xRooNode &fr = "", bool errorsHi = false, bool errorsLo = false, + int nErrorToys = 0, TH1 *templateHist = nullptr, bool nostack = true, + bool setInterp = false) const; xRooNode mainChild() const; void Draw(Option_t *opt = "") override; // *MENU* diff --git a/roofit/xroofit/src/xRooFit.cxx b/roofit/xroofit/src/xRooFit.cxx index d652fcbc3271b..98b32c666a8bc 100644 --- a/roofit/xroofit/src/xRooFit.cxx +++ b/roofit/xroofit/src/xRooFit.cxx @@ -52,7 +52,6 @@ #include "TGraphErrors.h" #include "TLegend.h" #include "TKey.h" -#include "../../roofitcore/src/RooAbsTestStatistic.h" #include "TPRegexp.h" #include "RooStringVar.h" @@ -94,6 +93,11 @@ RooCmdArg xRooFit::StrategySequence(const char *val) return RooCmdArg("StrategySequence", 0, 0, 0, 0, val); } +RooCmdArg xRooFit::MaxIterations(int val) +{ + return RooCmdArg("MaxIterations", val); +} + xRooNLLVar xRooFit::createNLL(const std::shared_ptr pdf, const std::shared_ptr data, const RooLinkedList &nllOpts) { @@ -628,7 +632,7 @@ class ProgressMonitor : public RooAbsReal { // doing a hesse step, estimate progress based on evaluations int nRequired = prevPars.size(); if (nRequired > 1) { - nRequired *= (nRequired - 1) / 2; + nRequired *= nRequired; if (fState == "Hesse3") { nRequired *= 4; } @@ -1133,9 +1137,8 @@ std::shared_ptr xRooFit::minimize(RooAbsReal &nll, // } // only do hesse if was a valid min and not full accurate cov matrix already (can happen if e.g. ran strat2) - if (hesse && - (m_strategy(sIdx) == 'h' || ((strategy < 2 || _minimizer.fitter()->GetMinimizer()->CovMatrixStatus() != 3) && - _minimizer.fitter()->Result().IsValid()))) { + if (hesse && m_hessestrategy.Length() != 0 && + (m_strategy(sIdx) == 'h' || (_minimizer.fitter()->Result().IsValid()))) { // Note: minima where the covariance was made posdef are deemed 'valid' ... @@ -1171,6 +1174,19 @@ std::shared_ptr xRooFit::minimize(RooAbsReal &nll, } while (sIdx != -1) { hesseStrategy = int(m_hessestrategy(sIdx) - '0'); + + if (strategy == 2 && hesseStrategy == 2) { + // don't repeat hesse if strategy=2 and hesseStrategy=2, and the matrix was valid + if (_minimizer.fitter()->GetMinimizer()->CovMatrixStatus() == 3) { + break; + } + if (sIdx >= m_hessestrategy.Length() - 1) { + break; // run out of strategies to try, stop + } + sIdx++; + continue; + } + _minimizer.fitter()->Config().MinimizerOptions().SetStrategy(hesseStrategy); // const_cast(_minimizer.fitter()->Config().MinimizerOptions().ExtraOptions())->SetValue("HessianStepTolerance",0.1); // const_cast(_minimizer.fitter()->Config().MinimizerOptions().ExtraOptions())->SetValue("HessianG2Tolerance",0.02); @@ -1241,6 +1257,7 @@ std::shared_ptr xRooFit::minimize(RooAbsReal &nll, if (std::unique_ptr mpars(floatPars->selectByAttrib("minos", true)); !mpars->empty()) { if (auto fff = dynamic_cast(_nll); fff) { fff->fState = "Minos"; + fff->counter2 = 0; } auto _status = _minimizer.minos(*mpars); statusHistory.push_back(std::pair("Minos", _status)); diff --git a/roofit/xroofit/src/xRooFitVersion.h b/roofit/xroofit/src/xRooFitVersion.h index b7776cc1edf35..3281d7550098b 100644 --- a/roofit/xroofit/src/xRooFitVersion.h +++ b/roofit/xroofit/src/xRooFitVersion.h @@ -12,5 +12,5 @@ #pragma once -#define GIT_COMMIT_HASH "32646f9" -#define GIT_COMMIT_DATE "2024-06-13 17:14:41 +0200" +#define GIT_COMMIT_HASH "c9465df" +#define GIT_COMMIT_DATE "2024-11-28 11:49:25 +0000" diff --git a/roofit/xroofit/src/xRooHypoSpace.cxx b/roofit/xroofit/src/xRooHypoSpace.cxx index b308978e14529..76fe6f8c6010a 100644 --- a/roofit/xroofit/src/xRooHypoSpace.cxx +++ b/roofit/xroofit/src/xRooHypoSpace.cxx @@ -185,6 +185,17 @@ int xRooNLLVar::xRooHypoSpace::AddPoints(const char *parName, size_t nPoints, do throw std::runtime_error("Unknown parameter"); _par->setAttribute("axis"); + if (low < _par->getMin()) { + Warning("AddPoints", "low edge of hypoSpace %g below lower bound of parameter: %g. Changing to lower bound", low, + _par->getMin()); + low = _par->getMin(); + } + if (high > _par->getMax()) { + Warning("AddPoints", "high edge of hypoSpace %g above upper bound of parameter: %g. Changing to upper bound", + high, _par->getMax()); + high = _par->getMax(); + } + if (nPoints == 1) { _par->setVal((high + low) * 0.5); AddPoint(); diff --git a/roofit/xroofit/src/xRooNLLVar.cxx b/roofit/xroofit/src/xRooNLLVar.cxx index 8225814158d11..c08488f9d84d8 100644 --- a/roofit/xroofit/src/xRooNLLVar.cxx +++ b/roofit/xroofit/src/xRooNLLVar.cxx @@ -22,8 +22,13 @@ This xRooNLLVar object has several special methods, e.g. for fitting and toy dat #if ROOT_VERSION_CODE < ROOT_VERSION(6, 27, 00) #define protected public #endif + #include "RooFitResult.h" -#include "../../roofitcore/src/RooNLLVar.h" + +#if ROOT_VERSION_CODE < ROOT_VERSION(6, 33, 00) +#include "RooNLLVar.h" +#endif + #ifdef protected #undef protected #endif @@ -92,6 +97,7 @@ This xRooNLLVar object has several special methods, e.g. for fitting and toy dat #include "TROOT.h" #include "TKey.h" #include "TRegexp.h" +#include "TStopwatch.h" BEGIN_XROOFIT_NAMESPACE @@ -181,12 +187,18 @@ xRooNLLVar::xRooNLLVar(const std::shared_ptr &pdf, } } else if (strcmp(opts.At(i)->GetName(), "Hesse") == 0) { fitConfig()->SetParabErrors(dynamic_cast(opts.At(i))->getInt(0)); // controls hesse + } else if (strcmp(opts.At(i)->GetName(), "Minos") == 0) { + fitConfig()->SetMinosErrors(dynamic_cast(opts.At(i))->getInt(0)); // controls minos } else if (strcmp(opts.At(i)->GetName(), "Strategy") == 0) { fitConfig()->MinimizerOptions().SetStrategy(dynamic_cast(opts.At(i))->getInt(0)); } else if (strcmp(opts.At(i)->GetName(), "StrategySequence") == 0) { fitConfigOptions()->SetNamedValue("StrategySequence", dynamic_cast(opts.At(i))->getString(0)); } else if (strcmp(opts.At(i)->GetName(), "Tolerance") == 0) { - fitConfig()->MinimizerOptions().SetTolerance(dynamic_cast(opts.At(i))->getInt(0)); + fitConfig()->MinimizerOptions().SetTolerance(dynamic_cast(opts.At(i))->getDouble(0)); + } else if (strcmp(opts.At(i)->GetName(), "MaxCalls") == 0) { + fitConfig()->MinimizerOptions().SetMaxFunctionCalls(dynamic_cast(opts.At(i))->getInt(0)); + } else if (strcmp(opts.At(i)->GetName(), "MaxIterations") == 0) { + fitConfig()->MinimizerOptions().SetMaxIterations(dynamic_cast(opts.At(i))->getInt(0)); } else if (strcmp(opts.At(i)->GetName(), "PrintLevel") == 0) { fitConfig()->MinimizerOptions().SetPrintLevel(dynamic_cast(opts.At(i))->getInt(0)); } else { @@ -390,6 +402,44 @@ void xRooNLLVar::reinitialize() } } } + std::map normRanges; + if (auto range = dynamic_cast(fOpts->find("RangeWithName"))) { + TString rangeName = range->getString(0); + if (auto sr = dynamic_cast(fOpts->find("SplitRange")); + sr && sr->getInt(0) && dynamic_cast(fPdf.get())) { + // doing split range ... need to loop over categories of simpdf and apply range to each + auto simPdf = dynamic_cast(fPdf.get()); + for (auto cat : simPdf->indexCat()) { + auto subpdf = simPdf->getPdf(cat.first.c_str()); + if (!subpdf) + continue; // state not in pdf + TString srangeName(rangeName); + srangeName.ReplaceAll(",", "_" + cat.first + ","); + srangeName += "_" + cat.first; + RooArgSet ss; + subpdf->treeNodeServerList(&ss, nullptr, true, false); + ss.add(*subpdf); + for (auto a : ss) { + if (a->InheritsFrom("RooAddPdf")) { + auto p = dynamic_cast(a); + normRanges[p] = p->normRange() ? p->normRange() : ""; + p->setNormRange(srangeName); + } + } + } + } else { + // set range on all AddPdfs before creating - needed in cases where coefs are present and need fractioning + // based on fit range bugfix needed: roofit needs to propagate the normRange to AddPdfs child nodes (used in + // createExpectedEventsFunc) + for (auto a : s) { + if (a->InheritsFrom("RooAddPdf")) { + auto p = dynamic_cast(a); + normRanges[p] = p->normRange() ? p->normRange() : ""; + p->setNormRange(rangeName); + } + } + } + } // before creating, clear away caches if any if pdf is in ws if (GETWS(fPdf)) { std::set setNames; @@ -411,6 +461,9 @@ void xRooNLLVar::reinitialize() // so swap those in ... question: is recursiveRedirectServers usage in RooAbsOptTestStatic (and here) a memory // leak?? where do the replaced servers get deleted?? + for (auto &[k, v] : normRanges) + k->setNormRange(v == "" ? nullptr : v.c_str()); + for (auto &a : attribs) std::shared_ptr::get()->setAttribute(a.c_str()); // create parent on next line to avoid triggering workspace initialization code in constructor of xRooNode @@ -451,6 +504,8 @@ xRooNLLVar::generate(bool expected, int seed) return xRooFit::generateFrom(*fPdf, *fr, expected, seed); } +xRooNLLVar::xRooFitResult::xRooFitResult(const RooFitResult &fr) : xRooFitResult(std::make_shared(fr)) {} + xRooNLLVar::xRooFitResult::xRooFitResult(const std::shared_ptr &in, const std::shared_ptr &nll) : std::shared_ptr(std::dynamic_pointer_cast(in->fComp)), fNode(in), @@ -774,7 +829,7 @@ double xRooNLLVar::getEntryBinWidth(size_t entry) const return volume; } -double xRooNLLVar::saturatedConstraintTerm() const +double xRooNLLVar::saturatedConstraintTermVal() const { // for each global observable in the dataset, determine which constraint term is associated to it // and given its type, add the necessary saturated term... @@ -838,23 +893,31 @@ double xRooNLLVar::mainTermNdof() const return data()->numEntries() - _floats->size(); } -double xRooNLLVar::mainTermPgof() const +double xRooNLLVar::mainTermVal() const { // using totVal - constraintTerm while new evalbackend causes mainTerm() to return nullptr - double val = get()->getVal(); + return get()->getVal() - constraintTermVal(); +} + +double xRooNLLVar::constraintTermVal() const +{ if (auto _constraintTerm = constraintTerm()) { - val -= _constraintTerm->getVal(); + return _constraintTerm->getVal(); } + return 0; +} - return TMath::Prob(2. * (val - saturatedMainTerm()), mainTermNdof()); +double xRooNLLVar::mainTermPgof() const +{ + return TMath::Prob(2. * (mainTermVal() - saturatedMainTermVal()), mainTermNdof()); } double xRooNLLVar::saturatedVal() const { - return saturatedMainTerm() + saturatedConstraintTerm(); + return saturatedMainTermVal() + saturatedConstraintTermVal(); } -double xRooNLLVar::saturatedMainTerm() const +double xRooNLLVar::saturatedMainTermVal() const { // Use this term to create a goodness-of-fit metric, which is approx chi2 distributed with numEntries (data) d.o.f: @@ -870,7 +933,7 @@ double xRooNLLVar::saturatedMainTerm() const std::set _binnedChannels = binnedChannels(); - // for binned case each entry is: -(-N + Nlog(N) - std::lgamma(N+1)) + // for binned case each entry is: -(-N + Nlog(N) - TMath::LnGamma(N+1)) // for unbinned case each entry is: -(N*log(N/(sumN*binW))) = -N*logN + N*log(sumN) + N*log(binW) // but unbinned gets extendedTerm = sumN - sumN*log(sumN) // so resulting sum is just sumN - sum[ N*logN - N*log(binW) ] @@ -884,15 +947,17 @@ double xRooNLLVar::saturatedMainTerm() const for (int i = 0; i < _data->numEntries(); i++) { _data->get(i); double w = _data->weight(); + if (w == 0) + continue; out -= w * std::log(w); if (_binnedChannels.count("*")) { - out += std::lgamma(w + 1); + out += TMath::LnGamma(w + 1); } else if (_binnedChannels.empty()) { out += w * std::log(getEntryBinWidth(i)); } else if (cat) { // need to determine which channel we are in for this entry to decide if binned or unbinned active if (_binnedChannels.count(_data->get()->getCatLabel(cat->GetName()))) { - out += std::lgamma(w + 1); + out += TMath::LnGamma(w + 1); } else { out += w * std::log(getEntryBinWidth(i)); } @@ -901,7 +966,7 @@ double xRooNLLVar::saturatedMainTerm() const } } - out += simTerm(); + out += simTermVal(); return out; } @@ -1143,7 +1208,10 @@ bool xRooNLLVar::setData(const std::pair, std::share } try { - if (!kReuseNLL || !mainTerm() || mainTerm()->operMode() == RooAbsTestStatistic::MPMaster) { + if (!kReuseNLL /*|| !mainTerm()*/ + /*|| mainTerm()->operMode() == RooAbsTestStatistic::MPMaster*/) { // lost access to RooAbsTestStatistic + // in 6.34, but MP-mode will still throw + // exception, so we will still catch it // happens when using MP need to rebuild the nll instead // also happens if there's no mainTerm(), which is the case in 6.32 where RooNLLVar is partially deprecated AutoRestorer snap(*fFuncVars); @@ -1158,13 +1226,18 @@ bool xRooNLLVar::setData(const std::pair, std::share } bool out = false; if (_data.first) { - if (_data.first->getGlobalObservables()) { - // replace in all terms - get()->setData(*_data.first, false); - } else { - // replace just in mainTerm ... note to self: why not just replace in all like above? should test! - out = mainTerm()->setData(*_data.first, false /* clone data? */); - } + // replace in all terms + out = get()->setData(*_data.first, false /* clone data */); + // get()->setValueDirty(); + // if (_data.first->getGlobalObservables()) { + // // replace in all terms + // out = get()->setData(*_data.first, false); + // get()->setValueDirty(); + // } else { + // // replace just in mainTerm ... note to self: why not just replace in all like above? should + // test! auto _mainTerm = mainTerm(); out = _mainTerm->setData(*_data.first, false /* clone data? + // */); _mainTerm->setValueDirty(); + // } } else { reset(); } @@ -1222,34 +1295,63 @@ void xRooNLLVar::AddOption(const RooCmdArg &opt) RooAbsData *xRooNLLVar::data() const { + return fData.get(); + /* +#if ROOT_VERSION_CODE < ROOT_VERSION(6, 33, 00) auto _nll = mainTerm(); if (!_nll) return fData.get(); - RooAbsData *out = &_nll->data(); - if (!out) - return fData.get(); - return out; + RooAbsData *out = &static_cast(_nll)->data(); +#else + RooAbsData* out = nullptr; // new backends not conducive to having a reference to a RooAbsData in them (they use +buffers instead) #endif if (!out) return fData.get(); return out; + */ } -RooNLLVar *xRooNLLVar::mainTerm() const +/* +RooAbsReal *xRooNLLVar::mainTerm() const { - auto _func = func(); - if (auto a = dynamic_cast(_func.get()); a) - return a; + return nullptr; + // the main term is the "other term" in a RooAddition alongside a ConstraintSum + // if can't find the ConstraintSum, just return the function + + RooAbsArg* _func = func().get(); + if(!_func->InheritsFrom("RooAddition")) { + _func = nullptr; + // happens with new 6.32 backend, where the top-level function is an EvaluatorWrapper + for (auto s : func()->servers()) { + if(s->InheritsFrom("RooAddition")) { + _func = s; break; + } + } + if(!_func) { + return func().get(); + } + } + std::set others,constraints; for (auto s : _func->servers()) { - if (auto a = dynamic_cast(s); a) - return a; + if(s->InheritsFrom("RooConstraintSum")) { + constraints.insert(s); + } else { + others.insert(s); + } } - return nullptr; + if(constraints.size()==1 && others.size()==1) { + return static_cast(*others.begin()); + } + return nullptr; // failed to find the right term? + + } + */ -double xRooNLLVar::extendedTerm() const +double xRooNLLVar::extendedTermVal() const { // returns Nexp - Nobs*log(Nexp) return fPdf->extendedTerm(fData->sumEntries(), fData->get()); } -double xRooNLLVar::simTerm() const +double xRooNLLVar::simTermVal() const { if (auto s = dynamic_cast(fPdf.get()); s) { return fData->sumEntries() * log(1.0 * (s->servers().size() - 1)); // one of the servers is the cat @@ -1257,7 +1359,7 @@ double xRooNLLVar::simTerm() const return 0; } -double xRooNLLVar::binnedDataTerm() const +double xRooNLLVar::binnedDataTermVal() const { // this is only relevant if BinnedLikelihood active // = sum[ N_i! ] since LnGamma(N_i+1) ~= N_i! @@ -1266,7 +1368,7 @@ double xRooNLLVar::binnedDataTerm() const double out = 0; for (int i = 0; i < fData->numEntries(); i++) { fData->get(i); - out += std::lgamma(fData->weight() + 1) - fData->weight() * std::log(getEntryBinWidth(i)); + out += TMath::LnGamma(fData->weight() + 1) - fData->weight() * std::log(getEntryBinWidth(i)); } return out; @@ -1421,29 +1523,60 @@ int xRooNLLVar::xRooHypoPoint::status() const void xRooNLLVar::xRooHypoPoint::Print(Option_t *) const { - std::cout << "POI: " << const_cast(this)->poi().contentsString() - << " , null: " << dynamic_cast(const_cast(this)->poi().first())->getVal() - << " , alt: " - << dynamic_cast(const_cast(this)->alt_poi().first())->getVal(); + auto _poi = const_cast(this)->poi(); + auto _alt_poi = const_cast(this)->alt_poi(); + std::cout << "POI: " << _poi.contentsString() << " , null: "; + bool first = true; + for (auto a : _poi) { + auto v = dynamic_cast(a); + if (!a) + continue; + if (!first) + std::cout << ","; + std::cout << v->getVal(); + first = false; + } + std::cout << " , alt: "; + first = true; + bool any_alt = false; + for (auto a : _alt_poi) { + auto v = dynamic_cast(a); + if (!a) + continue; + if (!first) + std::cout << ","; + std::cout << v->getVal(); + first = false; + if (!std::isnan(v->getVal())) + any_alt = true; + } std::cout << " , pllType: " << fPllType << std::endl; std::cout << " - ufit: "; if (fUfit) { - std::cout << fUfit->GetName() << " " << fUfit->minNll() << " (status=" << fUfit->status() << ") (" - << const_cast(this)->mu_hat().GetName() - << "_hat: " << const_cast(this)->mu_hat().getVal() << " +/- " - << const_cast(this)->mu_hat().getError() << ")" << std::endl; + std::cout << fUfit->GetName() << " " << fUfit->minNll() << " (status=" << fUfit->status() << ") ("; + first = true; + for (auto a : _poi) { + auto v = dynamic_cast(fUfit->floatParsFinal().find(a->GetName())); + if (!v) + continue; + if (!first) + std::cout << ","; + std::cout << v->GetName() << "_hat: " << v->getVal() << " +/- " << v->getError(); + first = false; + } + std::cout << ")" << std::endl; } else { std::cout << "Not calculated" << std::endl; } - std::cout << " - null cfit: "; + std::cout << " - cfit_null: "; if (fNull_cfit) { std::cout << fNull_cfit->GetName() << " " << fNull_cfit->minNll() << " (status=" << fNull_cfit->status() << ")"; } else { std::cout << "Not calculated"; } - if (!std::isnan(dynamic_cast(const_cast(this)->alt_poi().first())->getVal())) { - std::cout << std::endl << " - alt cfit: "; + if (any_alt) { + std::cout << std::endl << " - cfit_alt: "; if (fAlt_cfit) { std::cout << fAlt_cfit->GetName() << " " << fAlt_cfit->minNll() << " (status=" << fAlt_cfit->status() << ")" << std::endl; @@ -1467,7 +1600,7 @@ void xRooNLLVar::xRooHypoPoint::Print(Option_t *) const } else { std::cout << "Not calculated"; } - std::cout << std::endl << " - asimov null cfit: "; + std::cout << std::endl << " - asimov cfit_null: "; if (fAsimov->fNull_cfit) { std::cout << fAsimov->fNull_cfit->GetName() << " " << fAsimov->fNull_cfit->minNll() << " (status=" << fAsimov->fNull_cfit->status() << ")"; @@ -1479,8 +1612,12 @@ void xRooNLLVar::xRooHypoPoint::Print(Option_t *) const } else { std::cout << std::endl; } + if (fLbound_cfit) { + std::cout << " - cfit_lbound: " << fLbound_cfit->GetName() << " " << fLbound_cfit->minNll() + << " (status=" << fLbound_cfit->status() << ")"; + } if (fGenFit) - std::cout << " - genFit: " << fGenFit->GetName() << std::endl; + std::cout << " - gfit: " << fGenFit->GetName() << std::endl; if (!nullToys.empty() || !altToys.empty()) { std::cout << " * null toys: " << nullToys.size(); size_t firstToy = 0; @@ -2942,7 +3079,8 @@ RooStats::HypoTestResult xRooNLLVar::xRooHypoPoint::result() fitDetails.addClone(RooRealVar("minNll", "minNll", 0)); fitDetails.addClone(RooRealVar("edm", "edm", 0)); auto fitDS = new RooDataSet("fits", "fit summary data", fitDetails); - fitDS->convertToTreeStore(); // strings not stored properly in vector store, so do convert! + // fitDS->convertToTreeStore(); // strings not stored properly in vector store, so do convert! - not needed since + // string var storage not properly supported - storing in globs list instead for (int i = 0; i < 7; i++) { std::shared_ptr fit; diff --git a/roofit/xroofit/src/xRooNode.cxx b/roofit/xroofit/src/xRooNode.cxx index 8a6901eb449a1..b40ba0e35feee 100644 --- a/roofit/xroofit/src/xRooNode.cxx +++ b/roofit/xroofit/src/xRooNode.cxx @@ -332,6 +332,14 @@ xRooNode::xRooNode(const char *name, const std::shared_ptr &comp, const } } + // load list of colors if there is one + if (auto colors = dynamic_cast(_ws->obj(gROOT->GetListOfColors()->GetName()))) { + gROOT->GetListOfColors()->Clear(); + for (auto col : *colors) { + gROOT->GetListOfColors()->Add(col); + } + } + // use the datasets if any to 'mark' observables int checkCount = 0; for (auto &d : _ws->allData()) { @@ -1212,6 +1220,13 @@ const char *xRooNode::GetIconName() const } return "xRooFitPDFStyle"; } + if (o->InheritsFrom("RooStats::ModelConfig")) { + if (!gClient->GetMimeTypeList()->GetIcon("xRooFitMCStyle", true)) { + gClient->GetMimeTypeList()->AddType("xRooFitMCStyle", "xRooFitMCStyle", "app_t.xpm", "app_t.xpm", + "->Browse()"); + } + return "xRooFitMCStyle"; + } if (auto a = dynamic_cast(o); a) { if (auto _ax = GetXaxis(); _ax && (a->isBinnedDistribution(*dynamic_cast(_ax->GetParent())) || @@ -1446,7 +1461,46 @@ xRooNode xRooNode::Remove(const xRooNode &child) throw std::runtime_error(TString::Format("Cannot find %s in %s", child.GetName(), fParent->GetName())); } return xRooNode(*arg); - } // todo: add support for RooAddPdf and RooAddition + } else if (auto p5 = fParent->get(); p5) { + auto arg = toRemove.get(); + if (!arg) + arg = p5->pdfList().find(child.GetName()); + if (!arg) + throw std::runtime_error(TString::Format("Cannot find %s in %s", child.GetName(), fParent->GetName())); + // remove, including coef removal .... + auto idx = p5->pdfList().index(arg); + + if (idx != -1) { + + const_cast(p5->pdfList()).remove(*arg); + p5->removeServer(*arg, true); + // have to be careful removing coef because if shared will end up removing them all!! + std::vector _coefs; + for (size_t ii = 0; ii < const_cast(p5->coefList()).size(); ii++) { + if (ii != size_t(idx)) + _coefs.push_back(const_cast(p5->coefList()).at(ii)); + } + const_cast(p5->coefList()).removeAll(); + for (auto &a : _coefs) + const_cast(p5->coefList()).add(*a); + + sterilize(); + } else { + throw std::runtime_error(TString::Format("Cannot find %s in %s", child.GetName(), fParent->GetName())); + } + return xRooNode(*arg); + } else if (auto p6 = fParent->get(); p6) { + auto arg = toRemove.get(); + if (!arg) + arg = p6->list().find(child.GetName()); + if (!arg) + throw std::runtime_error(TString::Format("Cannot find %s in %s", child.GetName(), fParent->GetName())); + // remove server ... doesn't seem to trigger removal from proxy + const_cast(p6->list()).remove(*arg); + p6->removeServer(*arg, true); + sterilize(); + return xRooNode(*arg); + } } if (auto w = get(); w) { @@ -1468,11 +1522,12 @@ xRooNode xRooNode::Remove(const xRooNode &child) return out; } else if (get() || get()) { return factors().Remove(child); - } else if (get()) { + } else if (get() || get() || get()) { return components().Remove(child); } - throw std::runtime_error("Removal not implemented for this type of object"); + throw std::runtime_error("Removal not implemented for object type " + + std::string(get() ? get()->ClassName() : "null")); } xRooNode xRooNode::Add(const xRooNode &child, Option_t *opt) @@ -1531,6 +1586,11 @@ xRooNode xRooNode::Add(const xRooNode &child, Option_t *opt) auto out = (child.get()) ? child.get() : getObject(child.GetName()).get(); out->setAttribute("poi"); return xRooNode(*out, *this); + } else if (!child.get() && fParent->get()) { + // may be creating poi at same time as adding, try add to parent + auto res = fParent->Add(child); + if (res.get()) + return Add(res); } throw std::runtime_error("Failed to add parameter of interest"); } else if ((strcmp(GetName(), ".pars") == 0 || strcmp(GetName(), ".vars") == 0) && fParent->get()) { @@ -1796,8 +1856,23 @@ xRooNode xRooNode::Add(const xRooNode &child, Option_t *opt) TString::Format("Expected Events of %s", _pdf->GetTitle()), *_pdf)); } else { + + // need to create a coefficient for each existing pdf first, like above + for (auto i = p->coefList().size(); i < p->pdfList().size(); i++) { + const_cast(p->coefList()) + .add(*acquireNew( + TString::Format("%s_extBind", p->pdfList().at(i)->GetName()), + TString::Format("Expected Events of %s", p->pdfList().at(i)->GetTitle()), + *static_cast(p->pdfList().at(i)))); + } + const_cast(p->coefList()).add(*acquire2("1", "1", 1)); } + // ensure not in no-coef mode any more + *reinterpret_cast(reinterpret_cast(p) + + p->Class()->GetDataMemberOffset("_allExtendable")) = false; + *reinterpret_cast(reinterpret_cast(p) + + p->Class()->GetDataMemberOffset("_haveLastCoef")) = true; } const_cast(p->pdfList()).add(*_pdf); sterilize(); @@ -1837,7 +1912,7 @@ xRooNode xRooNode::Add(const xRooNode &child, Option_t *opt) if (child.get()) { out = acquire(child.fComp); if (std::dynamic_pointer_cast(cc) && !TString(cc->GetOption()).Contains("nostyle")) { - xRooNode(out, *this).style(cc.get()); // transfer style if adding a histogram + xRooNode(out, *this).styles(cc.get()); // transfer style if adding a histogram } } if (!child.fComp && getObject(child.GetName())) { @@ -2097,11 +2172,21 @@ xRooNode xRooNode::Add(const xRooNode &child, Option_t *opt) // if child is a histogram, will create a RooProdPdf } else if (auto w = get(); w) { - child.convertForAcquisition(*this); + child.convertForAcquisition( + *this, child.get() ? "" : "func" /* if child is a string, allow it to be passed to factory */); if (child.get()) { if (auto _d = child.get()) { - // don't use acquire method to import, because that adds datasets as Embedded + // don't use acquire method to import, because that adds datasets as Embeddded if (!w->import(*_d)) { + // should upgrade vars with any obs from the dataset + if (_d->get()) { + std::unique_ptr(w->allVars().selectCommon(*_d->get()))->setAttribAll("obs"); + } + if (_d->getGlobalObservables()) { + std::unique_ptr globs(w->allVars().selectCommon(*_d->get())); + globs->setAttribAll("obs"); + globs->setAttribAll("global"); + } return xRooNode(child.GetName(), *w->data(child.GetName()), *this); } else { throw std::runtime_error( @@ -2127,7 +2212,7 @@ xRooNode xRooNode::Add(const xRooNode &child, Option_t *opt) auto _cat = acquire(catName.c_str(), catName.c_str()); _cat->setAttribute("obs"); auto out = acquireNew(child.GetName(), child.GetTitle(), *_cat); - Info("Add", "Created model RooSimultaneous::%s in workspace %s", out->GetName(), w->GetName()); + Info("Add", "Created pdf RooSimultaneous::%s in workspace %s", out->GetName(), w->GetName()); return xRooNode(out, *this); } } @@ -2592,7 +2677,25 @@ xRooNode xRooNode::Constrain(const xRooNode &child) if (!x) { throw std::runtime_error("Nowhere to put constraint"); } - + // get datasets of the swallower, and add glob to any globs lists + auto childGlobs = child.globs(); + if (!childGlobs.empty()) { + for (auto d : x->datasets()) { + if (auto globs = d->get()->getGlobalObservables()) { + RooArgSet newGlobs(*globs); + newGlobs.add(*childGlobs.get()); + d->get()->setGlobalObservables(newGlobs); + } + } + // also add to the workspaces globalObservables lists + if (x->ws()) { + for (auto &[k, v] : GETWSSETS(x->ws())) { + if (k == "globalObservables" || TString(k).EndsWith("_GlobalObservables")) { + const_cast(v).add(*childGlobs.get()); + } + } + } + } if (auto s = x->get(); s) { // put into every channel that features parameter x->browse(); @@ -2821,6 +2924,22 @@ xRooNode xRooNode::Multiply(const xRooNode &child, Option_t *opt) if (strcmp(GetName(), ".coef") == 0) { // covers both .coef and .coefs // need to add this into the relevant coef ... if its not a RooProduct, replace it with one first if (auto p = fParent->fParent->get()) { + // may be in no-coef mode ... in which case must create coefs (use "ExtendedBindings" but note that these need + // obs list passing to them + if (p->coefList().empty() && !p->pdfList().empty()) { + for (auto _pdf : p->pdfList()) { + const_cast(p->coefList()) + .add(*acquireNew(TString::Format("%s_extBind", _pdf->GetName()), + TString::Format("Expected Events of %s", _pdf->GetTitle()), + *static_cast(_pdf))); + } + Info("Multiply", "Created RooExtendedBinding coefficients for all pdfs of %s so that can multiply coef", + p->GetName()); + *reinterpret_cast(reinterpret_cast(p) + + p->Class()->GetDataMemberOffset("_allExtendable")) = false; + *reinterpret_cast(reinterpret_cast(p) + + p->Class()->GetDataMemberOffset("_haveLastCoef")) = true; + } for (size_t i = 0; i < p->pdfList().size(); i++) { if (p->pdfList().at(i) == fParent->get()) { auto coefs = p->coefList().at(i); @@ -2942,11 +3061,11 @@ xRooNode xRooNode::Multiply(const xRooNode &child, Option_t *opt) // need to create or hide inside a sumpdf or rooadpdf std::shared_ptr _pdf; if (!child.get() && strcmp(child.GetName(), "components") == 0) { - auto _sumpdf = acquireNew(Form("%s_%s", p2->GetName(), child.GetName()), - (strlen(child.GetTitle()) && strcmp(child.GetTitle(), child.GetName())) - ? child.GetTitle() - : p2->GetTitle(), - RooArgList(), RooArgList()); + auto _sumpdf = acquireNew( + Form("%s_%s", p2->GetName(), child.GetName()), + (strlen(child.GetTitle()) && strcmp(child.GetTitle(), child.GetName())) ? child.GetTitle() + : p2->GetTitle(), + RooArgList() /*, RooArgList() forces coef-mode if we specify this list */); _pdf = _sumpdf; } else { auto _sumpdf = acquireNew( @@ -4541,6 +4660,9 @@ std::shared_ptr xRooNode::convertForAcquisition(xRooNode &acquirer, con TString s(sName); s = TString(s(8, s.Length())); fComp.reset(acquirer.ws()->factory(s), [](TObject *) {}); + if (fComp) { + const_cast(this)->TNamed::SetName(fComp->GetName()); + } return fComp; } @@ -4548,6 +4670,11 @@ std::shared_ptr xRooNode::convertForAcquisition(xRooNode &acquirer, con } std::shared_ptr xRooNode::style(TObject *initObject, bool autoCreate) const +{ + return std::dynamic_pointer_cast(styles(initObject, autoCreate).fComp); +} + +xRooNode xRooNode::styles(TObject *initObject, bool autoCreate) const { TString t = GetTitle(); @@ -4601,7 +4728,7 @@ std::shared_ptr xRooNode::style(TObject *initObject, bool autoCreate) co arg->setStringAttribute("style", style->GetName()); } - return style; + return xRooNode(style, *this); } std::shared_ptr xRooNode::acquire(const std::shared_ptr &arg, bool checkFactory, bool mustBeNew) @@ -4835,17 +4962,21 @@ std::shared_ptr xRooNode::find(const std::string &name, bool browseRes } return child; } - if (auto x = mainChild(); x && strcmp(child->GetName(), x.GetName()) == 0) { - // can browse directly into main children as if their children were our children - for (auto &child2 : x.browse()) { - if (auto _obj = child2->get(); name == child2->GetName() || partname == child2->GetName() || - (_obj && name == _obj->GetName()) || (_obj && partname == _obj->GetName())) { - if (browseResult) - child2->browse(); // needed for onward read (or is it? there's a browse above too??) - if (partname != name && name != child2->GetName()) { - return child2->at(name.substr(partname.length() + 1)); + if (partname.find('.') != 0) { // do not allow mainChild browsing if trying to find a "." child ... as is done in + // getObject for ".memory" + if (auto x = mainChild(); x && strcmp(child->GetName(), x.GetName()) == 0) { + // can browse directly into main children as if their children were our children + for (auto &child2 : x.browse()) { + if (auto _obj = child2->get(); name == child2->GetName() || partname == child2->GetName() || + (_obj && name == _obj->GetName()) || + (_obj && partname == _obj->GetName())) { + if (browseResult) + child2->browse(); // needed for onward read (or is it? there's a browse above too??) + if (partname != name && name != child2->GetName()) { + return child2->at(name.substr(partname.length() + 1)); + } + return child2; } - return child2; } } } @@ -4858,6 +4989,15 @@ std::shared_ptr xRooNode::find(const std::string &name, bool browseRes } return child2; } + // allow calling of find on a RooWorkspace to access getObject objects ... + if (get() && name != ".memory") { + if (auto obj = getObject(name)) { + auto out = std::make_shared(obj, *this); + if (browseResult) + out->browse(); + return out; + } + } return nullptr; } @@ -4912,6 +5052,10 @@ std::shared_ptr xRooNode::operator[](const std::string &name) return child2; } auto out = std::make_shared(partname.c_str(), nullptr, *this); // not adding as child yeeet + // special case, if creating a node in the workspace with a specific name, it's a folder node ... + if (get() && partname == "pdfs") { + out->SetName("!pdfs"); + } if (partname != name) { return out->operator[](name.substr(partname.length() + 1)); } @@ -5103,6 +5247,14 @@ xRooNode &xRooNode::browse() existing->fTimes++; existing->fFolder = c->fFolder; // transfer folder assignment } else { + // mark any existing children with the same name for cleanup - this happens e.g. if did a Replace on one + // of these nodes note that the child nodes will still become reordered (the old node will be deleted, + // new node will appear at end) + for (auto &child : *this) { + if (strcmp(child->GetName(), c->GetName()) == 0) { + child->fTimes = 0; + } + } emplace_back(c); } } else if (auto s = dynamic_cast(_proxy)) { @@ -5616,6 +5768,8 @@ xRooNode xRooNode::components() const out.back()->fFolder = "!styles"; } else if (strcmp(out.back()->get()->ClassName(), "RooStats::HypoTestInverterResult") == 0) { out.back()->fFolder = "!scans"; + } else if (strcmp(out.back()->get()->ClassName(), "RooStats::ModelConfig") == 0) { + out.back()->fFolder = "!models"; } else { out.back()->fFolder = "!objects"; } @@ -5637,6 +5791,14 @@ xRooNode xRooNode::components() const out.emplace_back(std::make_shared(*snap, *this)); out.back()->fFolder = "!snapshots"; } + } else if (auto mc = get()) { + // add the pdf as a child, and the external constraints set if its there + if (mc->GetPdf()) { + out.emplace_back(std::make_shared(".pdf", *mc->GetPdf(), *this)); + } + if (mc->GetExternalConstraints()) { + out.emplace_back(std::make_shared(".extCons", *mc->GetExternalConstraints(), *this)); + } } else if (strlen(GetName()) > 0 && GetName()[0] == '!' && fParent) { // special case of dynamic property if (TString(GetName()) == "!.pars") { @@ -5721,9 +5883,35 @@ xRooNode xRooNode::bins() const return out; } -xRooNode xRooNode::coefs() const +xRooNode xRooNode::coefs(bool recurse) const { RooArgList coefs; + + if (recurse && fParent) { + // get our coefs and multiply it by the parents coefs ... + auto ourCoefs = xRooNode::coefs(false); + auto parentCoefs = fParent->coefs(true); + if (!parentCoefs.get()) { + // no coefs to include, just return our coefs + return ourCoefs; + } + if (!ourCoefs.get()) { + // just return the parent's coefs + return parentCoefs; + } + // if got here, must combine parentCoefs and outCoefs into a RooProduct + xRooNode out(".recursiveCoefs", + std::make_shared(".recursiveCoefs", + TString::Format("Recursive Coefficients of %s", GetName()), + *ourCoefs.get(), *parentCoefs.get()), + *this); + // keep alive the two coef nodes by adding to out's memory + auto mem = out.emplace_back(std::make_shared(".memory", nullptr, *this)); + mem->emplace_back(std::make_shared(ourCoefs)); + mem->emplace_back(std::make_shared(parentCoefs)); + return out; + } + bool isResidual = false; // if parent is a sumpdf or addpdf then include the coefs @@ -5855,8 +6043,10 @@ xRooNode xRooNode::factors() const bool show(true); for (auto c : a->clients()) { show = false; - if (c->InheritsFrom("RooProduct")) + if (c->InheritsFrom("RooProduct")) { show = true; + break; + } } if (show) out.emplace_back(std::make_shared(*a, *this)); @@ -6087,6 +6277,8 @@ xRooNode xRooNode::datasets() const } } }*/ + } else if (auto mc = get()) { + return xRooNode(*mc->GetPdf(), fParent).datasets(); } return out; @@ -6191,7 +6383,9 @@ TGraph *xRooNode::BuildGraph(RooAbsLValue *v, bool includeZeros, TVirtualPad *fr // auto x = theData->get()->find((v) ? dynamic_cast(v)->GetName() : theHist->GetXaxis()->GetName()); // const RooAbsReal* xvar = (x) ? dynamic_cast(x) : nullptr; // const RooAbsCategory* xcat = (x && !xvar) ? dynamic_cast(x) : nullptr; - auto x = _obs.find((v) ? dynamic_cast(v)->GetName() : theHist->GetXaxis()->GetName()); + auto x = _obs.find((v) ? dynamic_cast(v)->GetName() + : (theHist->GetXaxis()->IsAlphanumeric() ? theHist->GetXaxis()->GetTimeFormatOnly() + : theHist->GetXaxis()->GetName())); if (x && x->get()->getAttribute("global")) { // is global observable ... dataGraph->SetPoint(0, x->get()->getVal(), 1e-15); @@ -6306,8 +6500,8 @@ TGraph *xRooNode::BuildGraph(RooAbsLValue *v, bool includeZeros, TVirtualPad *fr // gROOT->GetListOfStyles()->Add(style.get()); // } // } - auto _style = style(dataGraph); - if (_style) { + auto _styleNode = styles(dataGraph); + if (auto _style = _styleNode.get()) { *dynamic_cast(dataGraph) = *_style; *dynamic_cast(dataGraph) = *_style; *dynamic_cast(dataGraph) = *_style; @@ -6653,6 +6847,11 @@ const char *xRooNode::GetRange() const return out.c_str(); } +xRooNLLVar xRooNode::nll(const char *_data, std::initializer_list nllOpts) const +{ + return nll(xRooNode(_data), nllOpts); +} + xRooNLLVar xRooNode::nll(const xRooNode &_data) const { return nll(_data, *xRooFit::createNLLOptions()); @@ -6679,6 +6878,10 @@ xRooNLLVar xRooNode::nll(const xRooNode &_data, std::initializer_list xRooNode xRooNode::generate(const xRooNode &fr, bool expected, int seed) { + if (auto mc = get()) { + return xRooNode(*mc->GetPdf(), fParent).generate(fr, expected, seed); + } + if (!get()) { // before giving up, if this is a workspace we can proceed if we only have one model if (get()) { @@ -6756,13 +6959,31 @@ xRooNode xRooNode::generate(const xRooNode &fr, bool expected, int seed) xRooNLLVar xRooNode::nll(const xRooNode &_data, const RooLinkedList &opts) const { + if (auto mc = get()) { + if (mc->GetExternalConstraints()) { + RooLinkedList optsWithConstraints; + for (auto o : opts) { + optsWithConstraints.Add(o->Clone(nullptr)); + } + optsWithConstraints.Add(RooFit::ExternalConstraints(*mc->GetExternalConstraints()).Clone(nullptr)); + return xRooNode(*mc->GetPdf(), fParent).nll(_data, optsWithConstraints); + } else { + return xRooNode(*mc->GetPdf(), fParent).nll(_data, opts); + } + } if (!get()) { - // before giving up, if this is a workspace we can proceed if we only have one model + // before giving up, if this is a workspace we can proceed if we only have one model or pdf if (get()) { - std::shared_ptr mainModel; + std::shared_ptr mainPdf, mainModel, otherPdf; for (auto &c : const_cast(this)->browse()) { if (c->get()) { + if (!mainPdf) { + mainPdf = c; + } else { + otherPdf = c; + } + } else if (c->get()) { if (!mainModel) { mainModel = c; } else { @@ -6774,6 +6995,14 @@ xRooNLLVar xRooNode::nll(const xRooNode &_data, const RooLinkedList &opts) const } if (mainModel) return mainModel->nll(_data, opts); + if (mainPdf) { + if (otherPdf) { + throw std::runtime_error(TString::Format("Workspace has multiple pdfs, you must specify which to " + "build nll with (found at least %s and %s)", + mainPdf->GetName(), otherPdf->GetName())); + } + return mainPdf->nll(_data, opts); + } } throw std::runtime_error(TString::Format("%s is not a pdf", GetName())); } @@ -7034,6 +7263,7 @@ xRooNode xRooNode::reduced(const std::string &_range, bool invert) const } else if (!get() || get()) { // filter the children .... handle special case of filtering ".vars" with "x" option too xRooNode out(std::make_shared(), fParent); + out.SetName(TString(GetName()) + "_reduced"); size_t nobs = 0; bool notAllArgs = false; bool isVars = (strcmp(GetName(), ".vars") == 0); @@ -7159,6 +7389,139 @@ class xRooProjectedPdf : public RooProjectedPdf { } }; +double new_getPropagatedError(const RooAbsReal &f, const RooFitResult &fr, const RooArgSet &nset = {}, + RooArgList **pars = nullptr, bool asymHi = false, bool asymLo = false) +{ + // Calling getParameters() might be costly, but necessary to get the right + // parameters in the RooAbsReal. The RooFitResult only stores snapshots. + + // handle simple case that function is a RooRealVar + if (auto rrv = dynamic_cast(&f); rrv) { + if (auto frrrv = dynamic_cast(fr.floatParsFinal().find(*rrv)); frrrv) { + rrv = frrrv; // use value from fit result + } + if (asymHi) { + return rrv->getErrorHi(); + } else if (asymLo) { + return rrv->getErrorLo(); + } else { + return rrv->getError(); + } + } + + RooArgList *_pars = (pars) ? *pars : nullptr; + + if (!_pars) { + + RooArgSet allParamsInAbsReal; + f.getParameters(&nset, allParamsInAbsReal); + + _pars = new RooArgList; + for (auto *rrvFitRes : static_range_cast(fr.floatParsFinal())) { + + auto rrvInAbsReal = static_cast(allParamsInAbsReal.find(*rrvFitRes)); + + // Strip out parameters with zero error + if (rrvFitRes->getError() <= std::abs(rrvFitRes->getVal()) * std::numeric_limits::epsilon()) + continue; + + // Ignore parameters in the fit result that this RooAbsReal doesn't depend on + if (!rrvInAbsReal) + continue; + + // Checking for float equality is a bad. We check if the values are + // negligibly far away from each other, relative to the uncertainty. + if (std::abs(rrvInAbsReal->getVal() - rrvFitRes->getVal()) > 0.01 * rrvFitRes->getError()) { + std::stringstream errMsg; + errMsg << "RooAbsReal::getPropagatedError(): the parameters of the RooAbsReal don't have" + << " the same values as in the fit result! The logic of getPropagatedError is broken in this case."; + + throw std::runtime_error(errMsg.str()); + } + + _pars->add(*rrvInAbsReal); + } + } + + // Make std::vector of variations + TVectorD F(_pars->size()); + + // Create std::vector of plus,minus variations for each parameter + TMatrixDSym V(_pars->size() == fr.floatParsFinal().size() ? fr.covarianceMatrix() + : fr.reducedCovarianceMatrix(*_pars)); + + // TODO: if _pars includes pars not in fr, need to extend matrix with uncorrelated errors of those pars + + double nomVal = f.getVal(nset); + + for (std::size_t ivar = 0; ivar < _pars->size(); ivar++) { + + auto &rrv = static_cast((*_pars)[ivar]); + auto *frrrv = static_cast(fr.floatParsFinal().find(rrv)); + + double cenVal = rrv.getVal(); + double plusVar, minusVar, errVal; + + if (asymHi || asymLo) { + errVal = frrrv->getErrorHi(); + rrv.setVal(cenVal + errVal); + plusVar = f.getVal(nset); + errVal = frrrv->getErrorLo(); + rrv.setVal(cenVal + errVal); + minusVar = f.getVal(nset); + if (asymHi) { + // pick the one that moved result 'up' most + plusVar = std::max(plusVar, minusVar); + minusVar = 2 * nomVal - plusVar; // symmetrizes + } else { + // pick the one that moved result 'down' most + minusVar = std::min(plusVar, minusVar); + plusVar = 2 * nomVal - minusVar; // symmetrizes + } + } else { + errVal = sqrt(V(ivar, ivar)); + // Make Plus variation + rrv.setVal(cenVal + errVal); + plusVar = f.getVal(nset); + // Make Minus variation + rrv.setVal(cenVal - errVal); + minusVar = f.getVal(nset); + } + F[ivar] = (plusVar - minusVar) * 0.5; + rrv.setVal(cenVal); + } + + // Re-evaluate this RooAbsReal with the central parameters just to be + // extra-safe that a call to `getPropagatedError()` doesn't change any state. + // It should not be necessary because thanks to the dirty flag propagation + // the RooAbsReal is re-evaluated anyway the next time getVal() is called. + // Still there are imaginable corner cases where it would not be triggered, + // for example if the user changes the RooFit operation more after the error + // propagation. + f.getVal(nset); + + TMatrixDSym C(_pars->size()); + std::vector errVec(_pars->size()); + for (std::size_t i = 0; i < _pars->size(); i++) { + errVec[i] = std::sqrt(V(i, i)); + for (std::size_t j = i; j < _pars->size(); j++) { + C(i, j) = V(i, j) / std::sqrt(V(i, i) * V(j, j)); + C(j, i) = C(i, j); + } + } + + // Calculate error in linear approximation from variations and correlation coefficient + double sum = F * (C * F); + + if (!pars) { + delete _pars; + } else { + *pars = _pars; + } + + return sqrt(sum); +} + class PdfWrapper : public RooAbsPdf { public: // need expPdf option while RooProjectedPdf doesn't support keeping things extended @@ -7501,7 +7864,8 @@ void xRooNode::sterilize() const } // observables not in the axisVars are automatically projected over -xRooNode xRooNode::histo(const xRooNode &vars, const xRooNode &fr, bool content, bool errors) const +xRooNode xRooNode::histo(const xRooNode &vars, const xRooNode &fr, bool content, bool errors, bool stack, bool errorsHi, + bool errorsLo, int nErrorToys) const { if (!vars.fComp && strlen(vars.GetName())) { @@ -7512,235 +7876,17 @@ xRooNode xRooNode::histo(const xRooNode &vars, const xRooNode &fr, bool content, RooAbsLValue *v = nullptr; if (vars.empty()) { - out.fComp = std::shared_ptr(BuildHistogram(nullptr, !content, errors, -1, -1, fr)); + // does an integral + out.fComp = std::shared_ptr( + BuildHistogram(nullptr, !content, errors, -1, -1, fr, errorsHi, errorsLo, nErrorToys, nullptr, !stack, false)); } else if (vars.size() == 1) { v = vars.at(0)->get(); - out.fComp = std::shared_ptr(BuildHistogram(v, !content, errors, 1, 0, fr)); + out.fComp = std::shared_ptr( + BuildHistogram(v, !content, errors, 1, 0, fr, errorsHi, errorsLo, nErrorToys, nullptr, !stack, true)); } else { throw std::runtime_error("multi-dim histo not yet supported"); } - if (auto h = out.get()) { - if (h->GetXaxis()->IsAlphanumeric()) { - // do this to get bin labels - h->GetXaxis()->SetName("xaxis"); // WARNING -- this messes up anywhere we GetXaxis()->GetName() - } - h->SetStats(false); - h->SetName(GetName()); - auto hCopy = static_cast(h->Clone("nominal")); - - if (content && !components().empty()) { - RooAbsReal *sf = nullptr; // TODO - support case of RooExtendPdf drawing (see ::Draw) - // build a stack - THStack *stack = new THStack("stack", ""); - int count = 2; - std::map colorByTitle; // TODO: should fill from any existing legend - std::set allTitles; - bool titleMatchName = true; - std::map histGroups; - std::vector hhs; - - // support for CMS model case where has single component containing many coeffs - // will build stack by setting each coeff equal to 0 in turn, rebuilding the histogram - // the difference from the "full" histogram will be the component - RooArgList cms_coefs; - if (!components().empty()) { - auto comps = components()[0]; - for (auto &c : *comps) { - if (c->fFolder == "!.coeffs") - cms_coefs.add(*c->get()); - } - } - - if (!cms_coefs.empty()) { - RooRealVar zero("zero", "", 0); - std::shared_ptr prevHist(static_cast(h->Clone())); - for (auto c : cms_coefs) { - // seems I have to remake the function each time, as haven't figured out what cache needs clearing? - std::unique_ptr f(dynamic_cast(components()[0]->get()->Clone("tmpCopy"))); - zero.setAttribute( - Form("ORIGNAME:%s", c->GetName())); // used in redirectServers to say what this replaces - f->redirectServers(RooArgSet(zero), false, true); // each time will replace one additional coef - // zero.setAttribute(Form("ORIGNAME:%s",c->GetName()),false); (commented out so that on next iteration - // will still replace all prev) - auto hh = xRooNode(*f, *this).BuildHistogram(v, false, false, !v ? -1 : 1, !v ? -1 : 0, fr); - if (sf) - hh->Scale(sf->getVal()); - if (strlen(hh->GetTitle()) == 0) - hh->SetTitle(c->GetName()); // ensure all hists has titles - titleMatchName &= (TString(c->GetName()) == hh->GetTitle() || - TString(hh->GetTitle()).BeginsWith(TString(c->GetName()) + "_")); - std::shared_ptr nextHist(static_cast(hh->Clone())); - hh->Add(prevHist.get(), -1.); - hh->Scale(-1.); - hhs.push_back(hh); - prevHist = nextHist; - } - } else { - for (auto &samp : components()) { - auto hh = samp->BuildHistogram(v, false, false, !v ? -1 : 1, !v ? -1 : 0, fr); - if (sf) - hh->Scale(sf->getVal()); - hhs.push_back(hh); - if (strlen(hh->GetTitle()) == 0) - hh->SetTitle(samp->GetName()); // ensure all hists has titles - titleMatchName &= (TString(samp->GetName()) == hh->GetTitle() || - TString(hh->GetTitle()).BeginsWith(TString(samp->GetName()) + "_")); - } - } - for (auto &hh : hhs) { - if (h->GetXaxis()->IsAlphanumeric()) { - // must ensure bin labels match for stack - hh->GetXaxis()->SetName("xaxis"); - for (int i = 1; i <= hh->GetNbinsX(); i++) - hh->GetXaxis()->SetBinLabel(i, h->GetXaxis()->GetBinLabel(i)); - } - // automatically group hists that all have the same title - if (histGroups.find(hh->GetTitle()) == histGroups.end()) { - histGroups[hh->GetTitle()] = hh; - } else { - // add it into this group - histGroups[hh->GetTitle()]->Add(hh); - delete hh; - continue; - } - auto hhMin = (hh->GetMinimum() == 0) ? hh->GetMinimum(1e-9) : hh->GetMinimum(); - if (!stack->GetHists() && h->GetMinimum() > hhMin) { - auto newMin = hhMin - (h->GetMaximum() - hhMin) * gStyle->GetHistTopMargin(); - if (hhMin >= 0 && newMin < 0) - newMin = hhMin * 0.99; - h->SetMinimum(newMin); /// adjustYRange(newMin, h->GetMaximum()); - } - if (auto it = colorByTitle.find(hh->GetTitle()); it != colorByTitle.end()) { - hh->SetFillColor(it->second); - } else { - bool used = false; - do { - hh->SetFillColor((count++) % 100); - // check not already used this color - used = false; - for (auto hh2 : hhs) { - if (hh != hh2 && hh2->GetFillColor() == hh->GetFillColor()) { - used = true; - break; - } - } - } while (used); - colorByTitle[hh->GetTitle()] = hh->GetFillColor(); - } - /*if(stack->GetHists() && stack->GetHists()->GetEntries()>0) { - // to remove rounding effects on bin boundaries, see if binnings compatible - auto _h1 = dynamic_cast(stack->GetHists()->At(0)); - if(_h1->GetNbinsX()==hh->GetNbinsX()) TODO ... finish dealing with silly rounding effects - }*/ - TString thisOpt = ""; /// dOpt; - // uncomment next line to blend continuous with discrete components .. get some unpleasant "poke through" - // effects though - // if(auto s = samp->get(); s) thisOpt = s->isBinnedDistribution(*dynamic_cast(v)) ? - // "" : "LF2"; - stack->Add(hh, thisOpt); - allTitles.insert(hh->GetTitle()); - } - - TList *ll = stack->GetHists(); - if (ll && ll->GetEntries()) { - - // get common prefix to strip off only if all titles match names and - // any title is longer than 10 chars - size_t e = std::min(allTitles.begin()->size(), allTitles.rbegin()->size()); - size_t ii = 0; - bool goodPrefix = false; - std::string commonSuffix; - if (titleMatchName && ll->GetEntries() > 1) { - while (ii < e - 1 && allTitles.begin()->at(ii) == allTitles.rbegin()->at(ii)) { - ii++; - if (allTitles.begin()->at(ii) == '_' || allTitles.begin()->at(ii) == ' ') - goodPrefix = true; - } - - // find common suffix if there is one .. must start with a "_" - bool stop = false; - while (!stop && commonSuffix.size() < size_t(e - 1)) { - commonSuffix = allTitles.begin()->substr(allTitles.begin()->length() - commonSuffix.length() - 1); - for (auto &t : allTitles) { - if (!TString(t).EndsWith(commonSuffix.c_str())) { - commonSuffix = commonSuffix.substr(1); - stop = true; - break; - } - } - } - if (commonSuffix.find('_') == std::string::npos) { - commonSuffix = ""; - } else { - commonSuffix = commonSuffix.substr(commonSuffix.find('_')); - } - } - if (!goodPrefix) - ii = 0; - - // also find how many characters are needed to distinguish all entries (that dont have the same name) - // then carry on up to first space or underscore - size_t jj = 0; - std::map reducedTitles; - while (reducedTitles.size() != allTitles.size()) { - jj++; - std::map titlesMap; - for (auto &s : allTitles) { - if (reducedTitles.count(s)) - continue; - titlesMap[s.substr(0, jj)]++; - } - for (auto &s : allTitles) { - if (titlesMap[s.substr(0, jj)] == 1 && (jj >= s.length() || s.at(jj) == ' ' || s.at(jj) == '_')) { - reducedTitles[s] = s.substr(0, jj); - } - } - } - - // strip common prefix and suffix before adding - for (int i = ll->GetEntries() - 1; i >= 0; i--) { // go in reverse order - auto _title = (ll->GetEntries() > 5) ? reducedTitles[ll->At(i)->GetTitle()] : ll->At(i)->GetTitle(); - _title = _title.substr(ii < _title.size() ? ii : 0); - if (!commonSuffix.empty() && TString(_title).EndsWith(commonSuffix.c_str())) - _title = _title.substr(0, _title.length() - commonSuffix.length()); - - dynamic_cast(ll->At(i))->SetTitle(_title.c_str()); - - // style hists according to available styles ... creating if necessary - auto _style = xRooNode(*ll->At(i), *this).style(ll->At(i)); - if (_style) { - *dynamic_cast(ll->At(i)) = *_style; - *dynamic_cast(ll->At(i)) = *_style; - *dynamic_cast(ll->At(i)) = *_style; - } - // for stacks, fill color of white should be color 10 unless fill style is 0 - if (dynamic_cast(ll->At(i))->GetFillColor() == kWhite && - dynamic_cast(ll->At(i))->GetFillStyle() != 0) { - // kWhite means 'transparent' in ROOT ... should really use a FillStyle of 0 for that - // so assume user wanted actual white, which is color 10 - dynamic_cast(ll->At(i))->SetFillColor(10); - } - } - } - h->GetListOfFunctions()->Add(stack, "noclearsame"); - if (h->GetSumw2() && h->GetSumw2()->GetSum()) { - hCopy->SetFillStyle(3005); - hCopy->SetFillColor(h->GetLineColor()); - hCopy->SetMarkerStyle(0); - h->GetListOfFunctions()->Add(hCopy->Clone(".copy"), "e2same"); - *static_cast(hCopy) = *h; - } - } - - h->GetListOfFunctions()->Add(hCopy, "histsame"); - if (h->GetSumw2() && h->GetSumw2()->GetSum()) { - h->SetFillStyle(3005); - h->SetFillColor(h->GetLineColor()); - h->SetMarkerStyle(0); - } - } - return out; } @@ -7749,8 +7895,9 @@ xRooNode xRooNode::filter(const xRooNode &range) const return xRooNode(fComp, xRooNode(range.GetName(), nullptr, *this)); } -TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binStart, int binEnd, - const xRooNode &_fr) const +TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binStart, int binEnd, const xRooNode &_fr, + bool errorsHi, bool errorsLo, int nErrorToys, TH1 *templateHist, bool nostack, + bool setInterp) const { auto rar = get(); if (!rar) @@ -7775,26 +7922,41 @@ TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binS // make a single-bin histogram of just this value h = new TH1D(rar->GetName(), rar->GetTitle(), 1, 0, 1); h->GetXaxis()->SetBinLabel(1, rar->GetName()); - h->GetXaxis()->SetName(rar->GetName()); + h->GetXaxis()->SetTimeFormat(rar->GetName()); } } auto x = dynamic_cast(v); bool setTitle = false; - if (x) { + if (templateHist) { + // using template hist for the binning + h = static_cast(templateHist->Clone(rar->GetName())); + if (h->GetListOfFunctions()) + h->GetListOfFunctions()->Clear(); + h->SetDirectory(0); + h->SetTitle(rar->GetTitle()); + h->Reset(); + } else if (x) { if (x == rar) { // self histogram ... h = new TH1D(rar->GetName(), rar->GetTitle(), 1, 0, 1); h->Sumw2(); h->GetXaxis()->SetBinLabel(1, rar->GetName()); h->SetBinContent(1, rar->getVal()); - if (x->hasError()) + if (x->getError()) { h->SetBinError(1, x->getError()); + h->SetFillStyle(3005); + h->SetFillColor(h->GetLineColor()); + } h->SetMaximum(x->hasMax() ? x->getMax() : (h->GetBinContent(1) + std::max(std::abs(h->GetBinContent(1) * 0.1), 50.))); h->SetMinimum(x->hasMin() ? x->getMin() : (h->GetBinContent(1) - std::max(std::abs(h->GetBinContent(1) * 0.1), 50.))); h->GetXaxis()->SetName(dynamic_cast(v)->GetName()); + h->SetOption("e2"); + h->SetMarkerSize(0); + h->SetMarkerStyle(0); + return h; } auto _ax = GetXaxis(); @@ -7828,7 +7990,7 @@ TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binS } else { h = new TH1D(rar->GetName(), rar->GetTitle(), v->numBins(), x->getBinning().array()); } - + h->Sumw2(); } else if (!h) { h = new TH1D(rar->GetName(), rar->GetTitle(), v->numBins(rar->GetName()), 0, v->numBins(rar->GetName())); if (auto cat = dynamic_cast(v)) { @@ -7841,25 +8003,27 @@ TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binS h->GetXaxis()->SetBinLabel(i++, label.c_str()); } } + h->Sumw2(); } if (auto o = dynamic_cast(v); o && !setTitle) { h->GetXaxis()->SetTitle(o->GetTitle()); } TH1::AddDirectory(t); - h->Sumw2(); if (v) { if (h->GetXaxis()->IsAlphanumeric()) { // store the variable name in the TimeFormat property as well, b.c. alphanumeric requires axis name to be // "xaxis" h->GetXaxis()->SetTimeFormat(dynamic_cast(v)->GetName()); + } else { + h->GetXaxis()->SetName(dynamic_cast(v)->GetName()); // WARNING: messes up display of bin labels } - h->GetXaxis()->SetName(dynamic_cast(v)->GetName()); // WARNING: messes up display of bin labels } - if (auto s = style(nullptr, false); s) { - static_cast(*h) = *s; - static_cast(*h) = *s; - static_cast(*h) = *s; + if (auto s = styles(nullptr, false); s) { + auto _style = s.get(); + static_cast(*h) = *_style; + static_cast(*h) = *_style; + static_cast(*h) = *_style; } if (strlen(h->GetXaxis()->GetTitle()) == 0) h->GetXaxis()->SetTitle(vv->GetTitle()); @@ -8065,6 +8229,14 @@ TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binS bool scaleExpected = (p && p->canBeExtended() && !_coefs.get()); // Note about above: if pdf has coefficients then its embedded in a RooAddPdf that has coefs defined ... // in this case we should *not* scale by expected, since the coefs become the scaling instead + // we should also not build a stack for this (may be a RooRealSumPdf inside a RooAddPdf, but the + // samples of the RooRealSumPdf wont be correctly scaled to line up with overall RooRealSumPdf + // which will be normalized to its coefficient + if (!nostack && p && p->canBeExtended() && _coefs.get()) { + nostack = true; + // if wanted to still hve a stack, would need to scale the stack subcomponents by + // coefs-value / p_integral(raw) ... since raw p-integral will be what stack integrates to + } std::unique_ptr snap(normSet.snapshot()); TStopwatch timeIt; @@ -8074,87 +8246,225 @@ TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binS binEnd = 1; } auto cat = (!x) ? dynamic_cast(v) : nullptr; - for (int i = std::max(1, binStart); i <= std::min(h->GetNbinsX(), binEnd); i++) { - timeIt.Start(true); - if (x) { - x->setVal(h->GetBinCenter(i)); - } else if (cat) { - cat->setLabel(h->GetXaxis()->GetBinLabel(i)); // because order might not match "binning" order - } else if (v) { - v->setBin(i - 1); - } - if (x && !x->inRange("coordRange")) - continue; + RooArgList *errorPars = nullptr; + std::unique_ptr errorParsSnap; - double r = 0; - if (!empty) { - r = /*(p && p->selfNormalized())*/ rar->getVal(p ? &normSet : nullptr); -#if ROOT_VERSION_CODE < ROOT_VERSION(6, 27, 00) - if (std::isnan(r) && RooNaNPacker::isNaNWithPayload(r)) { - r = -RooNaNPacker::unpackNaN(r); + if (!v) { + setInterp = false; + } + + if (setInterp) { + RooAbsArg *vvv = dynamic_cast(v); + // determining if histogram should have interpolation drawing options set on it + // need to strip namespace to discount the "HistFactory" namespace classes from all being treated as binned + TString clNameNoNamespace = rar->ClassName(); + clNameNoNamespace = clNameNoNamespace(clNameNoNamespace.Last(':') + 1, clNameNoNamespace.Length()); + setInterp = (clNameNoNamespace.Contains("Hist") || vvv->isCategory() || rar->isBinnedDistribution(*vvv) || + h->GetNbinsX() == 1 || rar->getAttribute("BinnedLikelihood") || + (dynamic_cast(vvv) && + std::unique_ptr>(rar->binBoundaries(*dynamic_cast(vvv), + -std::numeric_limits::infinity(), + std::numeric_limits::infinity())))) + ? false + : true; + if (auto d = dynamic_cast(rar); d && !d->isBinnedDistribution(*vvv) && h->GetNbinsX() != 1) { + setInterp = true; // hist func is interpolated, so draw it as such + } + if (setInterp && !components().empty()) { + // check if all components of dOpt are "Hist" type (CMS model support) + // if so then don't interp; + bool allHist = true; + for (auto &s : components()) { + TString _clName = s->get()->ClassName(); + _clName = _clName(_clName.Last(':') + 1, _clName.Length()); + if (!(s->get() && _clName.Contains("Hist"))) { + allHist = false; + break; + } } -#endif - if (r && _coefs.get()) { - r *= _coefs.get()->getVal(normSet); - } - if (needBinWidth) { - r *= h->GetBinWidth(i); - } - if (scaleExpected) { - // std::cout << r << " exp = " << p->expectedEvents(normSet) << " for normRange " << (p->normRange() ? - // p->normRange() : "null") << std::endl; p->Print();rar->Print(); - r *= (p->expectedEvents(normSet)); - } // do in here in case dependency on var - } - h->SetBinContent(i, r); - - if (errors) { - double res; - if (p) { - // std::cout << "computing error of :" << h->GetBinCenter(i) << std::endl; - // //fr->floatParsFinal().Print(); fr->covarianceMatrix().Print(); - res = PdfWrapper((oldrar) ? *rar : *p, _coefs.get(), !v, oldrar ? p : nullptr) - .getSimplePropagatedError(*fr, normSet); + if (allHist) + setInterp = false; + } + if (setInterp) { + h->SetOption("l"); // does linear interpolation between points + } + } + + if (errors) { + // may be computing potentially asymmetric errors + // the main histogram will be the error band, and the nominal histogram will be added as a function + // so that it is drawn over the top of the error band + // note that this means GetBinContent on returned histogram will return midpoint of the up and down error + auto l = static_cast(h->Clone("nominal")); + l->SetDirectory(0); + l->SetFillStyle(0); + h->GetListOfFunctions()->Add(l, (setInterp) ? "lsame" : "histsame"); + h->SetOption(setInterp ? "e3" : "e2"); // default draw option E2 or E3 so error band shown .. could have used + // 'EX0' to draw "classic style" + // could take this from the 'band' style object if we create one in future? + h->SetMarkerSize(0); + h->SetFillStyle(3005); + h->SetFillColor(h->GetLineColor()); + } + + if (nErrorToys > 0) { + errors = false; // wont evaluate error on each toy, will estimate for std.dev or normiles of toys + // need list of errorPars + auto allPars = + (!_coefs.get() ? *this : xRooNode(RooProduct("tmp", "tmp", RooArgList(*rar, *_coefs.get())))) + .pars(); + errorPars = new RooArgList; // will be in same order as appear in fr. + for (auto a : fr->floatParsFinal()) { + if (auto par = allPars.get()->find(*a)) { + errorPars->add(*par); + } + } + errorParsSnap.reset(errorPars->snapshot()); + auto l = static_cast(h->Clone("toys")); + l->Reset(); // removes any functions + l->SetDirectory(0); + h->GetListOfFunctions()->Add( + l, "histsame"); // ensures just this empty hist will be drawn, and not each individual toy + + if (errorsLo || errorsHi) + empty = false; // must not be empty b.c. calculation of error relies on knowing nominal (see after loop) + } + + for (int toy = 0; toy < (nErrorToys + 1); toy++) { + + TH1 *main_h = h; + if (toy > 0) { + h = static_cast(main_h->Clone(TString::Format("toy_%d", toy))); + h->SetDirectory(0); + h->Reset(); + static_cast(main_h->GetListOfFunctions()->FindObject("toys"))->GetListOfFunctions()->Add(h); + // randomize the parameter values according to the fr's covariance matrix + errorPars->assignValueOnly(fr->randomizePars()); + } + + for (int i = std::max(1, binStart); i <= std::min(h->GetNbinsX(), binEnd); i++) { + timeIt.Start(true); + if (x) { + x->setVal(h->GetBinCenter(i)); + } else if (cat) { + cat->setLabel(h->GetXaxis()->GetBinLabel(i)); // because order might not match "binning" order + } else if (v) { + v->setBin(i - 1); + } + if (x && !x->inRange("coordRange")) + continue; + + double r = 0; + if (!empty || toy > 0) { + r = /*(p && p->selfNormalized())*/ rar->getVal(p ? &normSet : nullptr); #if ROOT_VERSION_CODE < ROOT_VERSION(6, 27, 00) - // improved normSet invalidity checking, so assuming no longer need this in 6.28 onwards - p->_normSet = nullptr; + if (std::isnan(r) && RooNaNPacker::isNaNWithPayload(r)) { + r = -RooNaNPacker::unpackNaN(r); + } #endif - } else { - res = RooProduct("errorEval", "errorEval", - RooArgList(*rar, !_coefs.get() ? RooFit::RooConst(1) : *_coefs.get())) - .getPropagatedError( - *fr /*, normSet*/); // should be no need to pass a normSet to a non-pdf (but not verified this) - // especially important not to pass in the case we are evaluated RooRealSumPdf as a function! otherwise - // error will be wrong - } - if (needBinWidth) { - res *= h->GetBinWidth(i); - } - h->SetBinError(i, res); - } - timeIt.Stop(); - lapTimes.push_back(timeIt.RealTime()); - double time_estimate = - (lapTimes.size() > 1) - ? (h->GetNbinsX() * (std::accumulate(lapTimes.begin() + 1, lapTimes.end(), 0.) / (lapTimes.size() - 1))) - : 0.; - if (!warned && (lapTimes.at(0) > 10 || (lapTimes.size() > 2 && time_estimate > 60.))) { - TTimeStamp t2; - t2.Add(time_estimate); - Warning("BuildHistogram", "Building this histogram will take until %s", t2.AsString()); - if (errors) { - // install interrupt handler - runningNode = this; - gOldHandlerr = signal(SIGINT, buildHistogramInterrupt); + if (r && _coefs.get()) { + r *= _coefs.get()->getVal(normSet); + } + if (needBinWidth) { + r *= h->GetBinWidth(i); + } + if (scaleExpected) { + // std::cout << r << " exp = " << p->expectedEvents(normSet) << " for normRange " << (p->normRange() ? + // p->normRange() : "null") << std::endl; p->Print();rar->Print(); + r *= (p->expectedEvents(normSet)); + } // do in here in case dependency on var } - warned = true; - } - if (fInterrupted) { + h->SetBinContent(i, r); + if (errors) { - Warning("BuildHistogram", "Skipping errors for remaining bins"); - errors = false; + static_cast(h->FindObject("nominal"))->SetBinContent(i, r); // transfer nominal to nominal hist + double res; + bool doAsym = (errorsHi && errorsLo); + if (doAsym) { + errorsHi = false; + } + if (p) { + // std::cout << "computing error of :" << h->GetBinCenter(i) << std::endl; + // //fr->floatParsFinal().Print(); fr->covarianceMatrix().Print(); + // res = PdfWrapper((oldrar) ? *rar : *p, _coefs.get(), !v, oldrar ? p : nullptr) + // .getSimplePropagatedError(*fr, normSet); + res = new_getPropagatedError( + PdfWrapper((oldrar) ? *rar : *p, _coefs.get(), !v, oldrar ? p : nullptr), *fr, normSet, + &errorPars, errorsHi, errorsLo); +#if ROOT_VERSION_CODE < ROOT_VERSION(6, 27, 00) + // improved normSet invalidity checking, so assuming no longer need this in 6.28 onwards + p->_normSet = nullptr; +#endif + } else { + // res = RooProduct("errorEval", "errorEval", + // RooArgList(*rar, !_coefs.get() ? RooFit::RooConst(1) : + // *_coefs.get())) + // .getPropagatedError( + // *fr /*, normSet*/); // should be no need to pass a normSet to a non-pdf (but + // not verified this) + res = new_getPropagatedError( + RooProduct("errorEval", "errorEval", + RooArgList(*rar, !_coefs.get() ? RooFit::RooConst(1) : *_coefs.get())), + *fr, {}, &errorPars, errorsHi, + errorsLo); // should be no need to pass a normSet to a non-pdf (but not verified this) + // especially important not to pass in the case we are evaluated RooRealSumPdf as a function! otherwise + // error will be wrong + } + if (needBinWidth) { + res *= h->GetBinWidth(i); + } + h->SetBinError(i, res); + if (doAsym) { + // compute Hi error + errorsHi = true; + errorsLo = false; + if (p) { + res = new_getPropagatedError( + PdfWrapper((oldrar) ? *rar : *p, _coefs.get(), !v, oldrar ? p : nullptr), *fr, normSet, + &errorPars, errorsHi, errorsLo); + } else { + res = new_getPropagatedError( + RooProduct("errorEval", "errorEval", + RooArgList(*rar, !_coefs.get() ? RooFit::RooConst(1) : *_coefs.get())), + *fr, {}, &errorPars, errorsHi, errorsLo); + } + if (needBinWidth) { + res *= h->GetBinWidth(i); + } + errorsLo = true; + // lowVal = content - error, highVal = content + res + // => band/2 = (res+error)/2 and band-mid = (2*content+res-error)/2 + h->SetBinContent(i, h->GetBinContent(i) + (res - h->GetBinError(i)) * 0.5); + h->SetBinError(i, (res + h->GetBinError(i)) * 0.5); + } + } + timeIt.Stop(); + lapTimes.push_back(timeIt.RealTime()); + double time_estimate = + (lapTimes.size() > 1) + ? (h->GetNbinsX() * (std::accumulate(lapTimes.begin() + 1, lapTimes.end(), 0.) / (lapTimes.size() - 1))) + : 0.; + if (!warned && (lapTimes.at(0) > 10 || (lapTimes.size() > 2 && time_estimate > 60.))) { + TTimeStamp t2; + t2.Add(time_estimate); + Warning("BuildHistogram", "Building this histogram will take until %s", t2.AsString()); + if (errors) { + // install interrupt handler + runningNode = this; + gOldHandlerr = signal(SIGINT, buildHistogramInterrupt); + } + warned = true; + } + if (fInterrupted) { + if (errors) { + Warning("BuildHistogram", "Skipping errors for remaining bins"); + errors = false; + } + fInterrupted = false; } - fInterrupted = false; + } + if (toy > 0) { + h = main_h; } } if (gOldHandlerr) { @@ -8163,6 +8473,45 @@ TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binS } normSet = *snap; + if (errorPars) { + if (errorParsSnap) + *errorPars = *errorParsSnap; + delete errorPars; + } + if (nErrorToys) { + // compute main histogram error bar from toys + // if not doing asymmetric, then will display std.dev + // otherwise will copy main to nominal and make main error bar s.t. it shows +/-1sigma vals + if (errorsLo && errorsHi) { + auto nomHist = static_cast(h->FindObject("nominal")); + nomHist->Add(h); + } + for (int i = 1; i <= h->GetNbinsX(); i++) { + std::vector vals; + vals.reserve(nErrorToys); + for (int j = 1; j < (nErrorToys + 1); j++) { + vals.push_back( + static_cast(h->FindObject("toys")->FindObject(TString::Format("toy_%d", j)))->GetBinContent(i)); + } + double upVal, downVal; + if (errorsLo || errorsHi) { + std::sort(vals.begin(), vals.end()); + upVal = vals.at(std::round(vals.size() * ROOT::Math::gaussian_cdf(1))); + downVal = vals.at(std::round(vals.size() * ROOT::Math::gaussian_cdf(-1))); + if (!errorsLo) + downVal = 2. * h->GetBinContent(i) - upVal; + if (!errorsHi) + upVal = 2. * h->GetBinContent(i) - downVal; + } else { + double err = TMath::StdDev(vals.begin(), vals.end()); + upVal = h->GetBinContent(i) + err; + downVal = h->GetBinContent(i) - err; + } + h->SetBinContent(i, (upVal + downVal) * 0.5); + h->SetBinError(i, (upVal - downVal) * 0.5); + } + } + if (oldrar) { std::vector extra; if (auto s = dynamic_cast(rar)) { @@ -8193,7 +8542,287 @@ TH1 *xRooNode::BuildHistogram(RooAbsLValue *v, bool empty, bool errors, int binS if (errors) { delete fr; } - //} + + // build a stack unless not requested + if (!nostack) { + // need to draw copy of hist so shown over the stack + auto hCopy = static_cast(h->Clone("copy")); + hCopy->Reset(); + hCopy->Add(h); // use Reset and Add to clear the function list (dont clear directly as may double-delete if same + // object added twice) + hCopy->SetStats(false); + h->GetListOfFunctions()->Add(hCopy, TString(h->GetOption()) + "same"); + h->GetListOfFunctions()->Add(hCopy, "axissame"); // prevents stack covering axis + TString dOpt = (setInterp) ? "LF2" : ""; // should become lf2 if interpolation of histogram is appropriate + + const xRooNode *rarNode = this; + RooAbsReal *sf = nullptr; + if (get()->InheritsFrom("RooExtendPdf")) { + const_cast(this)->browse(); + rarNode = find(".pdf").get(); + // rar = rarNode->get(); + sf = find(".n")->get(); + } + + THStack *stack = new THStack("stack", TString::Format("%s;%s", rar->GetTitle(), h->GetXaxis()->GetTitle())); + int count = 2; + std::map colorByTitle; // TODO: should fill from any existing legend + std::set allTitles; + bool titleMatchName = true; + std::map histGroups; + std::vector hhs; + std::set histsWithBadTitles; // these histograms will have their titles autoFormatted + + // support for CMS model case where has single component containing many coeffs + // will build stack by setting each coeff equal to 0 in turn, rebuilding the histogram + // the difference from the "full" histogram will be the component + RooArgList cms_coefs; + if (!rarNode->components().empty()) { + auto comps = rarNode->components()[0]; + for (auto &c : *comps) { + if (c->fFolder == "!.coeffs") + cms_coefs.add(*c->get()); + } + } + if (!cms_coefs.empty()) { + RooRealVar zero("zero", "", 0); + std::shared_ptr prevHist(static_cast(h->Clone())); + prevHist->Reset(); + prevHist->Add(h); + for (auto c : cms_coefs) { + // seems I have to remake the function each time, as haven't figured out what cache needs clearing? + std::unique_ptr f( + dynamic_cast(rarNode->components()[0]->get()->Clone("tmpCopy"))); + zero.setAttribute(Form("ORIGNAME:%s", c->GetName())); // used in redirectServers to say what this replaces + f->redirectServers(RooArgSet(zero), false, true); // each time will replace one additional coef + // zero.setAttribute(Form("ORIGNAME:%s",c->GetName()),false); (commented out so that on next iteration + // will still replace all prev) + auto hh = xRooNode(*f, *this).BuildHistogram(v); + hh->SetName(c->GetName()); + if (sf) + hh->Scale(sf->getVal()); + if (strlen(hh->GetTitle()) == 0) { + hh->SetTitle(c->GetName()); // ensure all hists has titles + histsWithBadTitles.insert(hh); + } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { + histsWithBadTitles.insert(hh); + } + titleMatchName &= (TString(c->GetName()) == hh->GetTitle() || + TString(hh->GetTitle()).BeginsWith(TString(c->GetName()) + "_")); + std::shared_ptr nextHist(static_cast(hh->Clone())); + hh->Add(prevHist.get(), -1.); + hh->Scale(-1.); + hhs.push_back(hh); + prevHist = nextHist; + } + } else if (get()) { + // need to create a histogram for each sample across all the channels - will rely on functionality below to + // merge them based on titles + + for (auto &chan : bins()) { + TString chanName(chan->GetName()); + chanName = chanName(chanName.Index("=") + 1, chanName.Length()); + auto samps = chan->mainChild(); + if (!samps) + samps = *chan; + for (auto &samp : samps.components()) { + auto hh = static_cast(h->Clone(samp->GetName())); + hh->Reset(); + hh->SetTitle(samp->GetTitle()); + if (strlen(hh->GetTitle()) == 0) { + hh->SetTitle(samp->GetName()); + histsWithBadTitles.insert(hh); + } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { + histsWithBadTitles.insert(hh); + } + hh->SetTitle(TString(hh->GetTitle()) + .ReplaceAll(TString(chan->get()->GetName()) + "_", + "")); // remove occurance of channelname_ in title (usually prefix) + titleMatchName &= (TString(samp->GetName()) == hh->GetTitle() || + TString(hh->GetTitle()).BeginsWith(TString(samp->GetName()) + "_")); + hh->SetBinContent(hh->GetXaxis()->FindFixBin(chanName), samp->GetContent()); + hhs.push_back(hh); + } + } + } else { + for (auto &samp : rarNode->components()) { + auto hh = samp->BuildHistogram( + v, empty, false /* no errors for stack*/, binStart, binEnd, _fr, false, false, 0, h, true, + setInterp); // passing h to ensure binning is the same for all subcomponent hists + hh->SetName(samp->GetName()); + if (sf) + hh->Scale(sf->getVal()); + hhs.push_back(hh); + if (strlen(hh->GetTitle()) == 0) { + hh->SetTitle(samp->GetName()); // ensure all hists has titles + histsWithBadTitles.insert(hh); + } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { + histsWithBadTitles.insert(hh); + } + titleMatchName &= (TString(samp->GetName()) == hh->GetTitle() || + TString(hh->GetTitle()).BeginsWith(TString(samp->GetName()) + "_")); + } + } + + if (!hhs.empty()) { + for (auto &hh : hhs) { + allTitles.insert(hh->GetTitle()); + } + + // get common prefix to strip off only if all titles match names and + // any title is longer than 10 chars + size_t e = std::min(allTitles.begin()->size(), allTitles.rbegin()->size()); + size_t ii = 0; + bool goodPrefix = false; + std::string commonSuffix; + if (titleMatchName && hhs.size() > 1) { + while (ii < e - 1 && allTitles.begin()->at(ii) == allTitles.rbegin()->at(ii)) { + ii++; + if (allTitles.begin()->at(ii) == '_' || allTitles.begin()->at(ii) == ' ') + goodPrefix = true; + } + + // find common suffix if there is one .. must start with a "_" + bool stop = false; + while (!stop && commonSuffix.size() < size_t(e - 1)) { + commonSuffix = allTitles.begin()->substr(allTitles.begin()->length() - commonSuffix.length() - 1); + for (auto &tt : allTitles) { + if (!TString(tt).EndsWith(commonSuffix.c_str())) { + commonSuffix = commonSuffix.substr(1); + stop = true; + break; + } + } + } + if (commonSuffix.find('_') == std::string::npos) { + commonSuffix = ""; + } else { + commonSuffix = commonSuffix.substr(commonSuffix.find('_')); + } + } + if (!goodPrefix) + ii = 0; + + // also find how many characters are needed to distinguish all entries (that dont have the same name) + // then carry on up to first space or underscore + size_t jj = 0; + std::map reducedTitles; + while (reducedTitles.size() != allTitles.size()) { + jj++; + std::map titlesMap; + for (auto &s : allTitles) { + if (reducedTitles.count(s)) + continue; + titlesMap[s.substr(0, jj)]++; + } + for (auto &s : allTitles) { + if (titlesMap[s.substr(0, jj)] == 1 && (jj >= s.length() || s.at(jj) == ' ' || s.at(jj) == '_')) { + reducedTitles[s] = s.substr(0, jj); + } + } + } + + // strip common prefix and suffix before adding + for (auto ritr = hhs.rbegin(); ritr != hhs.rend(); ++ritr) { // go in reverse order + if (!histsWithBadTitles.count((*ritr))) { + continue; + } + auto _title = (hhs.size() > 5) ? reducedTitles[(*ritr)->GetTitle()] : (*ritr)->GetTitle(); + _title = _title.substr(ii < _title.size() ? ii : 0); + if (!commonSuffix.empty() && TString(_title).EndsWith(commonSuffix.c_str())) + _title = _title.substr(0, _title.length() - commonSuffix.length()); + (*ritr)->SetTitle(_title.c_str()); + } + } + + for (auto &hh : hhs) { + // automatically group hists that all have the same title + if (histGroups.find(hh->GetTitle()) == histGroups.end()) { + histGroups[hh->GetTitle()] = hh; + } else { + // add it into this group + histGroups[hh->GetTitle()]->Add(hh); + delete hh; + hh = nullptr; + continue; + } + auto hhMin = (hh->GetMinimum() == 0) ? hh->GetMinimum(1e-9) : hh->GetMinimum(); + if (!stack->GetHists() && h->GetMinimum() > hhMin) { + auto newMin = hhMin - (h->GetMaximum() - hhMin) * gStyle->GetHistTopMargin(); + if (hhMin >= 0 && newMin < 0) + newMin = hhMin * 0.99; + // adjustYRange(newMin, h->GetMaximum()); + } + + /*if(stack->GetHists() && stack->GetHists()->GetEntries()>0) { + // to remove rounding effects on bin boundaries, see if binnings compatible + auto _h1 = dynamic_cast(stack->GetHists()->At(0)); + if(_h1->GetNbinsX()==hh->GetNbinsX()) TODO ... finish dealing with silly rounding effects + }*/ + TString thisOpt = TString(hh->GetOption()) == "l" ? "LF2" : ""; // need LF2 to get smooth line with fill + // uncomment next line to blend continuous with discrete components .. get some unpleasant "poke through" + // effects though + // if(auto s = samp->get(); s) thisOpt = s->isBinnedDistribution(*dynamic_cast(v)) ? + // "" : "LF2"; + stack->Add(hh, thisOpt); + } + // stack->SetBit(kCanDelete); // should delete its sub histograms + h->GetListOfFunctions()->AddFirst(stack, "noclear same"); + // stack->Draw("noclear same"); + // h->Draw( + // dOpt + sOpt + + // "same"); // overlay again .. if stack would cover original hist (negative components) we still see + // integral + // h->Draw("axissame"); // redraws axis + + TList *ll = stack->GetHists(); + if (ll && ll->GetEntries()) { + + // finally, ensure all hists are styled + for (auto ho : *ll) { + TH1 *hh = dynamic_cast(ho); + if (!hh) + continue; + bool createdStyle = (xRooNode(*hh, *this).styles(nullptr, false).get() == nullptr); + + if (createdStyle) { + // give hist a color, that isn't the same as any other hists color + hh->SetFillStyle(1001); // solid fill style + bool used = false; + do { + hh->SetFillColor((count++)); + // check not already used this color + used = false; + for (auto ho2 : *ll) { + TH1 *hh2 = dynamic_cast(ho2); + if (!hh2) + continue; + auto _styleNode = xRooNode(*hh2, *this).styles(hh2, false); + auto _style = _styleNode.get(); + if (hh != hh2 && _style && _style->GetFillColor() == hh->GetFillColor()) { + used = true; + break; + } + } + } while (used); + } + + auto _styleNode = xRooNode(*hh, *this).styles(hh); + if (auto _style = _styleNode.get()) { + *dynamic_cast(hh) = *_style; + *dynamic_cast(hh) = *_style; + *dynamic_cast(hh) = *_style; + } + // for stacks, fill color of white should be color 10 unless fill style is 0 + if (hh->GetFillColor() == kWhite && hh->GetFillStyle() != 0) { + // kWhite means 'transparent' in ROOT ... should really use a FillStyle of 0 for that + // so assume user wanted actual white, which is color 10 + hh->SetFillColor(10); + } + } + } + } + return h; } @@ -8521,7 +9150,9 @@ void xRooNode::Draw(Option_t *opt) if (!get() && !IsFolder() && !sOpt2.Contains("x=")) return; - if (auto ir = get()) { + if (auto mc = get()) { + xRooNode(*mc->GetPdf(), fParent).Draw(opt); // draw the pdf of the config + } else if (auto ir = get()) { xRooHypoSpace(ir).Draw(opt); gSystem->ProcessEvents(); return; @@ -8956,12 +9587,12 @@ void xRooNode::Draw(Option_t *opt) // int n = _size; // Int_t w = 1, h = 1; // if (pad->GetCanvas()->GetWindowWidth() > pad->GetCanvas()->GetWindowHeight()) { - // w = std::ceil(std::sqrt(n)); - // h = std::floor(std::sqrt(n)); + // w = TMath::Ceil(TMath::Sqrt(n)); + // h = TMath::Floor(TMath::Sqrt(n)); // if (w*h < n) w++; // } else { - // h = std::ceil(std::sqrt(n)); - // w = std::floor(std::sqrt(n)); + // h = TMath::Ceil(TMath::Sqrt(n)); + // w = TMath::Floor(TMath::Sqrt(n)); // if (w*h < n) h++; // } // // adjust the window size to display only 4 in the window, with scroll bars @@ -9085,10 +9716,13 @@ void xRooNode::Draw(Option_t *opt) } if (get()->InheritsFrom("RooProdPdf")) { - // draw the main pdf ... - mainChild().Draw(opt); - gPad->SetName(GetName()); - return; + // draw the main pdf, if there is one... + auto _mainChild = mainChild(); + if (_mainChild) { + _mainChild.Draw(opt); + gPad->SetName(GetName()); + return; + } } if (auto fr = get(); fr) { @@ -9709,6 +10343,7 @@ void xRooNode::Draw(Option_t *opt) graph->SetEditable(false); pNamesHist->SetLineWidth(0); pNamesHist->SetMarkerSize(0); + pNamesHist->SetMarkerStyle(0); graph->GetListOfFunctions()->Add(pNamesHist, "same"); // graph->SetHistogram(pNamesHist); if (doHorizontal) { @@ -9781,7 +10416,7 @@ void xRooNode::Draw(Option_t *opt) auto s = parentPdf(); if (s && s->get()) { // drawing dataset associated to a simultaneous means must find subpads with variation names - // may not have subpads if drawing a "Yield" plot ... + // may not have subpads if drawning a "Yield" plot ... bool doneDraw = false; for (auto c : s->bins()) { auto _pad = dynamic_cast(gPad->GetPrimitive(c->GetName())); @@ -9997,9 +10632,9 @@ void xRooNode::Draw(Option_t *opt) auto val = _nll.pars()->getRealValue(initPar->GetName()); if (ii > 1) _nll.pars()->setRealValue(initPar->GetName(), valueToDo); - auto _extTerm = _nll.extendedTerm(); + auto _extTerm = _nll.extendedTermVal(); _nll.pars()->setRealValue(initPar->GetName(), initPar->getVal()); - auto _extTerm2 = _nll.extendedTerm(); + auto _extTerm2 = _nll.extendedTermVal(); _nll.pars()->setRealValue(initPar->GetName(), val); for (int i = 1; i <= emptyHist->GetNbinsX(); i++) { emptyHist->SetBinContent(i, @@ -10025,20 +10660,36 @@ void xRooNode::Draw(Option_t *opt) auto rar = get(); const xRooNode *rarNode = this; if (!rar) { - get()->Draw(); + // draw a deleteable clone of the object we wrap (since we might own the object) + get()->DrawClone(opt); return; } - RooAbsReal *sf = nullptr; + // RooAbsReal *sf = nullptr; if (get()->InheritsFrom("RooExtendPdf")) { browse(); rarNode = find(".pdf").get(); // rar = rarNode->get(); - sf = find(".n")->get(); + // sf = find(".n")->get(); + } + + if (!nostack && !hasOverlay && + (rarNode->get()->InheritsFrom("RooRealSumPdf") || rarNode->get()->InheritsFrom("RooAddPdf") || + (v && rarNode->get()->InheritsFrom("RooSimultaneous") && + strcmp(dynamic_cast(v)->GetName(), rarNode->get()->indexCat().GetName()) == 0))) { + nostack = false; + } else { + // in all other cases, we do not build a stack + nostack = true; } - auto h = BuildHistogram(v, false, hasErrorOpt); - if (!h) + auto h = BuildHistogram(v, false, hasErrorOpt, 1, 0, "", false, false, 0, nullptr, nostack, true /*setInterp*/); + if (!h) { + if (get()) { + // draw a deleteable clone of the object we wrap (since we might own the object) + get()->DrawClone(opt); + } return; + } h->SetBit(kCanDelete); if (!v) @@ -10051,6 +10702,9 @@ void xRooNode::Draw(Option_t *opt) h->GetXaxis()->SetName("xaxis"); // WARNING -- this messes up anywhere we GetXaxis()->GetName() } + // get style now, before we mess with histogram title + // auto _styleNode = styles(h); + if (rar->InheritsFrom("RooAbsPdf") && !(rar->InheritsFrom("RooRealSumPdf") || rar->InheritsFrom("RooAddPdf") || rar->InheritsFrom("RooSimultaneous"))) { // append parameter values to title if has such @@ -10086,35 +10740,53 @@ void xRooNode::Draw(Option_t *opt) gPad->SetGrid(1, 1); } } - // need to strip namespace to discount the "HistFactory" namespace classes from all being treated as binned - TString clNameNoNamespace = rar->ClassName(); - clNameNoNamespace = clNameNoNamespace(clNameNoNamespace.Last(':') + 1, clNameNoNamespace.Length()); - TString dOpt = (clNameNoNamespace.Contains("Hist") || vv->isCategory() || rar->isBinnedDistribution(*vv) || - h->GetNbinsX() == 1 || rar->getAttribute("BinnedLikelihood") || - (dynamic_cast(vv) && - std::unique_ptr>(rar->binBoundaries(*dynamic_cast(vv), - -std::numeric_limits::infinity(), - std::numeric_limits::infinity())))) - ? "" - : "LF2"; - if (auto d = dynamic_cast(rar); d && !d->isBinnedDistribution(*vv) && h->GetNbinsX() != 1) { - dOpt = "LF2"; // hist func is interpolated, so draw it as such - } - if (dOpt == "LF2" && !components().empty()) { - // check if all components of dOpt are "Hist" type (CMS model support) - // if so then dOpt=""; - bool allHist = true; - for (auto &s : components()) { - TString _clName = s->get()->ClassName(); - _clName = _clName(_clName.Last(':') + 1, _clName.Length()); - if (!(s->get() && _clName.Contains("Hist"))) { - allHist = false; - break; - } - } - if (allHist) - dOpt = ""; - } + TString dOpt = h->GetOption(); + if (dOpt == "l") + h->SetFillStyle(0); + // // need to strip namespace to discount the "HistFactory" namespace classes from all being treated as binned + // TString clNameNoNamespace = rar->ClassName(); + // clNameNoNamespace = clNameNoNamespace(clNameNoNamespace.Last(':') + 1, clNameNoNamespace.Length()); + // TString dOpt = (clNameNoNamespace.Contains("Hist") || vv->isCategory() || rar->isBinnedDistribution(*vv) || + // h->GetNbinsX() == 1 || rar->getAttribute("BinnedLikelihood") || + // (dynamic_cast(vv) && + // std::unique_ptr>(rar->binBoundaries(*dynamic_cast(vv), + // -std::numeric_limits::infinity(), + // std::numeric_limits::infinity())))) + // ? "" + // : "LF2"; + // if (auto d = dynamic_cast(rar); d && !d->isBinnedDistribution(*vv) && h->GetNbinsX() != 1) { + // dOpt = "LF2"; // hist func is interpolated, so draw it as such + // } + // if (dOpt == "LF2" && !components().empty()) { + // // check if all components of dOpt are "Hist" type (CMS model support) + // // if so then dOpt=""; + // bool allHist = true; + // for (auto &s : components()) { + // TString _clName = s->get()->ClassName(); + // _clName = _clName(_clName.Last(':') + 1, _clName.Length()); + // if (!(s->get() && _clName.Contains("Hist"))) { + // allHist = false; + // break; + // } + // } + // if (allHist) + // dOpt = ""; + // } + // + // if(dOpt=="LF2") { + // // ensure any sub hists have lf2 option + // TObjLink *lnk = h->GetListOfFunctions()->FirstLink(); + // while (lnk) { + // if(auto hh = dynamic_cast(lnk->GetObject())) { + // if(TString(hh->GetName())=="band" && TString(lnk->GetOption())=="e2same") { + // lnk->SetOption("LF2 e3same"); + // } else if(TString(hh->GetName())=="nominal") { + // lnk->SetOption("L same"); + // } + // } + // lnk = lnk->Next(); + // } + // } if (rar == vv && rar->IsA() == RooRealVar::Class()) { dOpt += "TEXT"; @@ -10153,18 +10825,6 @@ void xRooNode::Draw(Option_t *opt) } TH1 *errHist = nullptr; - if (hasError) { - h->SetFillStyle(hasError ? 3005 : 0); - h->SetFillColor(h->GetLineColor()); - h->SetMarkerStyle(0); - errHist = dynamic_cast(h->Clone(Form("%s_err", h->GetName()))); - errHist->SetBit(kCanDelete); - errHist->SetDirectory(nullptr); - h->SetFillStyle(0); - for (int i = 1; i <= h->GetNbinsX(); i++) { - h->SetBinError(i, 0); - } - } if (!hasSame) clearPad(); @@ -10176,8 +10836,11 @@ void xRooNode::Draw(Option_t *opt) auto node = new xRooNode(*this); auto _hist = (errHist) ? errHist : h; auto hCopy = (errHist) ? nullptr : dynamic_cast(h->Clone()); - if (hCopy) + if (hCopy) { + hCopy->Reset(); + hCopy->Add(_hist); hCopy->SetDirectory(nullptr); + } _hist->GetListOfFunctions()->Add(node); _hist->GetListOfFunctions()->Add(new TExec( ".update", @@ -10197,12 +10860,13 @@ void xRooNode::Draw(Option_t *opt) errHist->SetFillColor(h->GetLineColor()); } else { hCopy->SetBit(kCanDelete); + hCopy->SetFillStyle(0); _hist->GetListOfFunctions()->Add(hCopy, "TEXT HIST same"); - _hist->SetBinError(1, 0); + //_hist->SetBinError(1, 0); } _hist->SetStats(false); // if (_hist->GetBinContent(1)==0.) _hist->SetBinContent(1,(_hist->GetMaximum()-_hist->GetMinimum())*0.005); - _hist->Draw(((errHist) ? "e2" : "")); + _hist->Draw(); //_hist->Draw(((hasError) ? "e2" : "")); gPad->Modified(); return; } @@ -10221,7 +10885,10 @@ void xRooNode::Draw(Option_t *opt) h->SetTitle(overlayName); // for overlays will take style from current gStyle before overriding with personal style // this ensures initial style will be whatever gStyle is, rather than whatever ours is - (TAttLine &)(*h) = *gStyle; + static_cast(*h) = *gStyle; + static_cast(*h) = *gStyle; + static_cast(*h) = *gStyle; + h->SetFillStyle(0); // explicit default for overlays will be transparent fill // std::shared_ptr style; // use to keep alive for access from GetStyle below, in case // getObject has decided to return the owning ptr (for some reason) if @@ -10243,14 +10910,14 @@ void xRooNode::Draw(Option_t *opt) // (TAttLine&)(*h) = *(gROOT->GetStyle(h->GetTitle()) ? gROOT->GetStyle(h->GetTitle()) : gStyle); // (TAttFill&)(*h) = *(gROOT->GetStyle(h->GetTitle()) ? gROOT->GetStyle(h->GetTitle()) : gStyle); // (TAttMarker&)(*h) = *(gROOT->GetStyle(h->GetTitle()) ? gROOT->GetStyle(h->GetTitle()) : gStyle); - auto _style = style(h); + auto _styleNode = styles(h); rar->setStringAttribute("style", oldStyle == "" ? nullptr : oldStyle.Data()); // restores old style - if (_style) { + if (auto _style = _styleNode.get()) { (TAttLine &)(*h) = *_style; (TAttFill &)(*h) = *_style; (TAttMarker &)(*h) = *_style; } - h->Draw(dOpt); + h->Draw(dOpt == "LF2" ? "e3" : dOpt); if (errHist) { errHist->SetTitle(overlayName); (TAttLine &)(*errHist) = *h; @@ -10258,283 +10925,310 @@ void xRooNode::Draw(Option_t *opt) } } } else { - auto _style = style(h); - if (_style) { - (TAttLine &)(*h) = *_style; - (TAttFill &)(*h) = *_style; - (TAttMarker &)(*h) = *_style; - if (errHist) { - (TAttLine &)(*errHist) = *h; - errHist->SetFillColor(h->GetLineColor()); - } - } - h->Draw(dOpt + sOpt); + // if (auto _style = _styleNode.get()) { + // (TAttLine &)(*h) = *_style; + // (TAttFill &)(*h) = *_style; + // (TAttMarker &)(*h) = *_style; + // if (errHist) { + // (TAttLine &)(*errHist) = *h; + // errHist->SetFillColor(h->GetLineColor()); + // } + // } + h->Draw(dOpt); } if (!hasOverlay && (rarNode->get()->InheritsFrom("RooRealSumPdf") || rarNode->get()->InheritsFrom("RooAddPdf") || (rarNode->get()->InheritsFrom("RooSimultaneous") && strcmp(vv->GetName(), rarNode->get()->indexCat().GetName()) == 0))) { - // build a stack unless not requested - if (!nostack) { - THStack *stack = new THStack(TString::Format("%s_stack", rar->GetName()), - TString::Format("%s;%s", rar->GetTitle(), h->GetXaxis()->GetTitle())); - int count = 2; - std::map colorByTitle; // TODO: should fill from any existing legend - std::set allTitles; - bool titleMatchName = true; - std::map histGroups; - std::vector hhs; - std::set histsWithBadTitles; // these histograms will have their titles autoFormatted - - // support for CMS model case where has single component containing many coeffs - // will build stack by setting each coeff equal to 0 in turn, rebuilding the histogram - // the difference from the "full" histogram will be the component - RooArgList cms_coefs; - if (!rarNode->components().empty()) { - auto comps = rarNode->components()[0]; - for (auto &c : *comps) { - if (c->fFolder == "!.coeffs") - cms_coefs.add(*c->get()); - } - } - if (!cms_coefs.empty()) { - RooRealVar zero("zero", "", 0); - std::shared_ptr prevHist(static_cast(h->Clone())); - for (auto c : cms_coefs) { - // seems I have to remake the function each time, as haven't figured out what cache needs clearing? - std::unique_ptr f( - dynamic_cast(rarNode->components()[0]->get()->Clone("tmpCopy"))); - zero.setAttribute( - Form("ORIGNAME:%s", c->GetName())); // used in redirectServers to say what this replaces - f->redirectServers(RooArgSet(zero), false, true); // each time will replace one additional coef - // zero.setAttribute(Form("ORIGNAME:%s",c->GetName()),false); (commented out so that on next iteration - // will still replace all prev) - auto hh = xRooNode(*f, *this).BuildHistogram(v); - hh->SetName(c->GetName()); - if (sf) - hh->Scale(sf->getVal()); - if (strlen(hh->GetTitle()) == 0) { - hh->SetTitle(c->GetName()); // ensure all hists has titles - histsWithBadTitles.insert(hh); - } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { - histsWithBadTitles.insert(hh); - } - titleMatchName &= (TString(c->GetName()) == hh->GetTitle() || - TString(hh->GetTitle()).BeginsWith(TString(c->GetName()) + "_")); - std::shared_ptr nextHist(static_cast(hh->Clone())); - hh->Add(prevHist.get(), -1.); - hh->Scale(-1.); - hhs.push_back(hh); - prevHist = nextHist; - } - } else if (get()) { - // need to create a histogram for each sample across all the channels - will rely on functionality below to - // merge them based on titles - - for (auto &chan : bins()) { - TString chanName(chan->GetName()); - chanName = chanName(chanName.Index("=") + 1, chanName.Length()); - auto samps = chan->mainChild(); - if (!samps) - samps = *chan; - for (auto &samp : samps.components()) { - auto hh = static_cast(h->Clone(samp->GetName())); - hh->Reset(); - hh->SetTitle(samp->GetTitle()); - if (strlen(hh->GetTitle()) == 0) { - hh->SetTitle(samp->GetName()); - histsWithBadTitles.insert(hh); - } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { - histsWithBadTitles.insert(hh); - } - hh->SetTitle(TString(hh->GetTitle()) - .ReplaceAll(TString(chan->get()->GetName()) + "_", - "")); // remove occurrence of channelname_ in title (usually prefix) - titleMatchName &= (TString(samp->GetName()) == hh->GetTitle() || - TString(hh->GetTitle()).BeginsWith(TString(samp->GetName()) + "_")); - hh->SetBinContent(hh->GetXaxis()->FindFixBin(chanName), samp->GetContent()); - hhs.push_back(hh); - } - } - } else { - for (auto &samp : rarNode->components()) { - auto hh = samp->BuildHistogram(v); - if (sf) - hh->Scale(sf->getVal()); - hhs.push_back(hh); - if (strlen(hh->GetTitle()) == 0) { - hh->SetTitle(samp->GetName()); // ensure all hists has titles - histsWithBadTitles.insert(hh); - } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { - histsWithBadTitles.insert(hh); - } - titleMatchName &= (TString(samp->GetName()) == hh->GetTitle() || - TString(hh->GetTitle()).BeginsWith(TString(samp->GetName()) + "_")); - } - } - - if (!hhs.empty()) { - for (auto &hh : hhs) { - allTitles.insert(hh->GetTitle()); - } - - // get common prefix to strip off only if all titles match names and - // any title is longer than 10 chars - size_t e = std::min(allTitles.begin()->size(), allTitles.rbegin()->size()); - size_t ii = 0; - bool goodPrefix = false; - std::string commonSuffix; - if (titleMatchName && hhs.size() > 1) { - while (ii < e - 1 && allTitles.begin()->at(ii) == allTitles.rbegin()->at(ii)) { - ii++; - if (allTitles.begin()->at(ii) == '_' || allTitles.begin()->at(ii) == ' ') - goodPrefix = true; - } - - // find common suffix if there is one .. must start with a "_" - bool stop = false; - while (!stop && commonSuffix.size() < size_t(e - 1)) { - commonSuffix = allTitles.begin()->substr(allTitles.begin()->length() - commonSuffix.length() - 1); - for (auto &t : allTitles) { - if (!TString(t).EndsWith(commonSuffix.c_str())) { - commonSuffix = commonSuffix.substr(1); - stop = true; - break; - } - } - } - if (commonSuffix.find('_') == std::string::npos) { - commonSuffix = ""; - } else { - commonSuffix = commonSuffix.substr(commonSuffix.find('_')); - } - } - if (!goodPrefix) - ii = 0; - - // also find how many characters are needed to distinguish all entries (that dont have the same name) - // then carry on up to first space or underscore - size_t jj = 0; - std::map reducedTitles; - while (reducedTitles.size() != allTitles.size()) { - jj++; - std::map titlesMap; - for (auto &s : allTitles) { - if (reducedTitles.count(s)) - continue; - titlesMap[s.substr(0, jj)]++; - } - for (auto &s : allTitles) { - if (titlesMap[s.substr(0, jj)] == 1 && (jj >= s.length() || s.at(jj) == ' ' || s.at(jj) == '_')) { - reducedTitles[s] = s.substr(0, jj); - } - } - } - - // strip common prefix and suffix before adding - for (auto ritr = hhs.rbegin(); ritr != hhs.rend(); ++ritr) { // go in reverse order - if (!histsWithBadTitles.count((*ritr))) { - continue; - } - auto _title = (hhs.size() > 5) ? reducedTitles[(*ritr)->GetTitle()] : (*ritr)->GetTitle(); - _title = _title.substr(ii < _title.size() ? ii : 0); - if (!commonSuffix.empty() && TString(_title).EndsWith(commonSuffix.c_str())) - _title = _title.substr(0, _title.length() - commonSuffix.length()); - (*ritr)->SetTitle(_title.c_str()); - } - } - - for (auto &hh : hhs) { - // automatically group hists that all have the same title - if (histGroups.find(hh->GetTitle()) == histGroups.end()) { - histGroups[hh->GetTitle()] = hh; - } else { - // add it into this group - histGroups[hh->GetTitle()]->Add(hh); - delete hh; - hh = nullptr; - continue; - } + if (auto stack = dynamic_cast(h->FindObject("stack"))) { + // access the stack and set draw options, adjust ranges etc + TObjLink *lnk = stack->GetHists()->FirstLink(); + while (lnk) { + TH1 *hh = static_cast(lnk->GetObject()); + // lnk->SetOption(dOpt); - not needed auto hhMin = (hh->GetMinimum() == 0) ? hh->GetMinimum(1e-9) : hh->GetMinimum(); - if (!stack->GetHists() && h->GetMinimum() > hhMin) { + if (lnk == stack->GetHists()->FirstLink() && h->GetMinimum() > hhMin) { auto newMin = hhMin - (h->GetMaximum() - hhMin) * gStyle->GetHistTopMargin(); if (hhMin >= 0 && newMin < 0) newMin = hhMin * 0.99; adjustYRange(newMin, h->GetMaximum()); } - - /*if(stack->GetHists() && stack->GetHists()->GetEntries()>0) { - // to remove rounding effects on bin boundaries, see if binnings compatible - auto _h1 = dynamic_cast(stack->GetHists()->At(0)); - if(_h1->GetNbinsX()==hh->GetNbinsX()) TODO ... finish dealing with silly rounding effects - }*/ - TString thisOpt = dOpt; - // uncomment next line to blend continuous with discrete components .. get some unpleasant "poke through" - // effects though - // if(auto s = samp->get(); s) thisOpt = s->isBinnedDistribution(*dynamic_cast(v)) ? - // "" : "LF2"; - stack->Add(hh, thisOpt); - } - stack->SetBit(kCanDelete); // should delete its sub histograms - stack->Draw("noclear same"); - h->Draw( - dOpt + sOpt + - "same"); // overlay again .. if stack would cover original hist (negative components) we still see integral - h->Draw("axissame"); // redraws axis - - TList *ll = stack->GetHists(); - if (ll && ll->GetEntries()) { - - // finally, ensure all hists are styled - for (auto ho : *ll) { - TH1 *hh = dynamic_cast(ho); - if (!hh) - continue; - bool createdStyle = (xRooNode(*hh, *this).style(nullptr, false) == nullptr); - - if (createdStyle) { - // give hist a color, that isn't the same as any other hists color - hh->SetFillStyle(1001); // solid fill style - bool used = false; - do { - hh->SetFillColor((count++)); - // check not already used this color - used = false; - for (auto ho2 : *ll) { - TH1 *hh2 = dynamic_cast(ho2); - if (!hh2) - continue; - auto _style = xRooNode(*hh2, *this).style(hh2, false); - if (hh != hh2 && _style && _style->GetFillColor() == hh->GetFillColor()) { - used = true; - break; - } - } - } while (used); - } - - auto _style = xRooNode(*hh, *this).style(hh); - if (_style) { - *dynamic_cast(hh) = *_style; - *dynamic_cast(hh) = *_style; - *dynamic_cast(hh) = *_style; - } - // for stacks, fill color of white should be color 10 unless fill style is 0 - if (hh->GetFillColor() == kWhite && hh->GetFillStyle() != 0) { - // kWhite means 'transparent' in ROOT ... should really use a FillStyle of 0 for that - // so assume user wanted actual white, which is color 10 - hh->SetFillColor(10); - } - addLegendEntry(hh, hh->GetTitle(), "f"); - } + addLegendEntry(hh, hh->GetTitle(), "f"); + lnk = lnk->Next(); } } + + // // build a stack unless not requested + // if (!nostack) { + // THStack *stack = new THStack(TString::Format("%s_stack", rar->GetName()), + // TString::Format("%s;%s", rar->GetTitle(), h->GetXaxis()->GetTitle())); + // int count = 2; + // std::map colorByTitle; // TODO: should fill from any existing legend + // std::set allTitles; + // bool titleMatchName = true; + // std::map histGroups; + // std::vector hhs; + // std::set histsWithBadTitles; // these histograms will have their titles autoFormatted + // + // // support for CMS model case where has single component containing many coeffs + // // will build stack by setting each coeff equal to 0 in turn, rebuilding the histogram + // // the difference from the "full" histogram will be the component + // RooArgList cms_coefs; + // if (!rarNode->components().empty()) { + // auto comps = rarNode->components()[0]; + // for (auto &c : *comps) { + // if (c->fFolder == "!.coeffs") + // cms_coefs.add(*c->get()); + // } + // } + // if (!cms_coefs.empty()) { + // RooRealVar zero("zero", "", 0); + // std::shared_ptr prevHist(static_cast(h->Clone())); + // for (auto c : cms_coefs) { + // // seems I have to remake the function each time, as haven't figured out what cache needs + // clearing? std::unique_ptr f( + // dynamic_cast(rarNode->components()[0]->get()->Clone("tmpCopy"))); + // zero.setAttribute( + // Form("ORIGNAME:%s", c->GetName())); // used in redirectServers to say what this + // replaces + // f->redirectServers(RooArgSet(zero), false, true); // each time will replace one additional coef + // // zero.setAttribute(Form("ORIGNAME:%s",c->GetName()),false); (commented out so that on next + // iteration + // // will still replace all prev) + // auto hh = xRooNode(*f, *this).BuildHistogram(v); + // hh->SetName(c->GetName()); + // if (sf) + // hh->Scale(sf->getVal()); + // if (strlen(hh->GetTitle()) == 0) { + // hh->SetTitle(c->GetName()); // ensure all hists has titles + // histsWithBadTitles.insert(hh); + // } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { + // histsWithBadTitles.insert(hh); + // } + // titleMatchName &= (TString(c->GetName()) == hh->GetTitle() || + // TString(hh->GetTitle()).BeginsWith(TString(c->GetName()) + "_")); + // std::shared_ptr nextHist(static_cast(hh->Clone())); + // hh->Add(prevHist.get(), -1.); + // hh->Scale(-1.); + // hhs.push_back(hh); + // prevHist = nextHist; + // } + // } else if (get()) { + // // need to create a histogram for each sample across all the channels - will rely on functionality + // below to + // // merge them based on titles + // + // for (auto &chan : bins()) { + // TString chanName(chan->GetName()); + // chanName = chanName(chanName.Index("=") + 1, chanName.Length()); + // auto samps = chan->mainChild(); + // if (!samps) + // samps = *chan; + // for (auto &samp : samps.components()) { + // auto hh = static_cast(h->Clone(samp->GetName())); + // hh->Reset(); + // hh->SetTitle(samp->GetTitle()); + // if (strlen(hh->GetTitle()) == 0) { + // hh->SetTitle(samp->GetName()); + // histsWithBadTitles.insert(hh); + // } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { + // histsWithBadTitles.insert(hh); + // } + // hh->SetTitle(TString(hh->GetTitle()) + // .ReplaceAll(TString(chan->get()->GetName()) + "_", + // "")); // remove occurance of channelname_ in title (usually + // prefix) + // titleMatchName &= (TString(samp->GetName()) == hh->GetTitle() || + // TString(hh->GetTitle()).BeginsWith(TString(samp->GetName()) + "_")); + // hh->SetBinContent(hh->GetXaxis()->FindFixBin(chanName), samp->GetContent()); + // hhs.push_back(hh); + // } + // } + // } else { + // for (auto &samp : rarNode->components()) { + // auto hh = samp->BuildHistogram(v,false,false,1,0,"",false,false,0,h); // passing h to ensure + // binning is the same for all subcomponent hists if (sf) + // hh->Scale(sf->getVal()); + // hhs.push_back(hh); + // if (strlen(hh->GetTitle()) == 0) { + // hh->SetTitle(samp->GetName()); // ensure all hists has titles + // histsWithBadTitles.insert(hh); + // } else if (strcmp(hh->GetName(), hh->GetTitle()) == 0) { + // histsWithBadTitles.insert(hh); + // } + // titleMatchName &= (TString(samp->GetName()) == hh->GetTitle() || + // TString(hh->GetTitle()).BeginsWith(TString(samp->GetName()) + "_")); + // } + // } + // + // if (!hhs.empty()) { + // for (auto &hh : hhs) { + // allTitles.insert(hh->GetTitle()); + // } + // + // // get common prefix to strip off only if all titles match names and + // // any title is longer than 10 chars + // size_t e = std::min(allTitles.begin()->size(), allTitles.rbegin()->size()); + // size_t ii = 0; + // bool goodPrefix = false; + // std::string commonSuffix; + // if (titleMatchName && hhs.size() > 1) { + // while (ii < e - 1 && allTitles.begin()->at(ii) == allTitles.rbegin()->at(ii)) { + // ii++; + // if (allTitles.begin()->at(ii) == '_' || allTitles.begin()->at(ii) == ' ') + // goodPrefix = true; + // } + // + // // find common suffix if there is one .. must start with a "_" + // bool stop = false; + // while (!stop && commonSuffix.size() < size_t(e - 1)) { + // commonSuffix = allTitles.begin()->substr(allTitles.begin()->length() - commonSuffix.length() - + // 1); for (auto &t : allTitles) { + // if (!TString(t).EndsWith(commonSuffix.c_str())) { + // commonSuffix = commonSuffix.substr(1); + // stop = true; + // break; + // } + // } + // } + // if (commonSuffix.find('_') == std::string::npos) { + // commonSuffix = ""; + // } else { + // commonSuffix = commonSuffix.substr(commonSuffix.find('_')); + // } + // } + // if (!goodPrefix) + // ii = 0; + // + // // also find how many characters are needed to distinguish all entries (that dont have the same + // name) + // // then carry on up to first space or underscore + // size_t jj = 0; + // std::map reducedTitles; + // while (reducedTitles.size() != allTitles.size()) { + // jj++; + // std::map titlesMap; + // for (auto &s : allTitles) { + // if (reducedTitles.count(s)) + // continue; + // titlesMap[s.substr(0, jj)]++; + // } + // for (auto &s : allTitles) { + // if (titlesMap[s.substr(0, jj)] == 1 && (jj >= s.length() || s.at(jj) == ' ' || s.at(jj) == + // '_')) { + // reducedTitles[s] = s.substr(0, jj); + // } + // } + // } + // + // // strip common prefix and suffix before adding + // for (auto ritr = hhs.rbegin(); ritr != hhs.rend(); ++ritr) { // go in reverse order + // if (!histsWithBadTitles.count((*ritr))) { + // continue; + // } + // auto _title = (hhs.size() > 5) ? reducedTitles[(*ritr)->GetTitle()] : (*ritr)->GetTitle(); + // _title = _title.substr(ii < _title.size() ? ii : 0); + // if (!commonSuffix.empty() && TString(_title).EndsWith(commonSuffix.c_str())) + // _title = _title.substr(0, _title.length() - commonSuffix.length()); + // (*ritr)->SetTitle(_title.c_str()); + // } + // } + // + // for (auto &hh : hhs) { + // // automatically group hists that all have the same title + // if (histGroups.find(hh->GetTitle()) == histGroups.end()) { + // histGroups[hh->GetTitle()] = hh; + // } else { + // // add it into this group + // histGroups[hh->GetTitle()]->Add(hh); + // delete hh; + // hh = nullptr; + // continue; + // } + // auto hhMin = (hh->GetMinimum() == 0) ? hh->GetMinimum(1e-9) : hh->GetMinimum(); + // if (!stack->GetHists() && h->GetMinimum() > hhMin) { + // auto newMin = hhMin - (h->GetMaximum() - hhMin) * gStyle->GetHistTopMargin(); + // if (hhMin >= 0 && newMin < 0) + // newMin = hhMin * 0.99; + // adjustYRange(newMin, h->GetMaximum()); + // } + // + // /*if(stack->GetHists() && stack->GetHists()->GetEntries()>0) { + // // to remove rounding effects on bin boundaries, see if binnings compatible + // auto _h1 = dynamic_cast(stack->GetHists()->At(0)); + // if(_h1->GetNbinsX()==hh->GetNbinsX()) TODO ... finish dealing with silly rounding effects + // }*/ + // TString thisOpt = dOpt; + // // uncomment next line to blend continuous with discrete components .. get some unpleasant "poke + // through" + // // effects though + // // if(auto s = samp->get(); s) thisOpt = + // s->isBinnedDistribution(*dynamic_cast(v)) ? + // // "" : "LF2"; + // stack->Add(hh, thisOpt); + // } + // stack->SetBit(kCanDelete); // should delete its sub histograms + // stack->Draw("noclear same"); + // h->Draw( + // dOpt + sOpt + + // "same"); // overlay again .. if stack would cover original hist (negative components) we still see + // integral + // h->Draw("axissame"); // redraws axis + // + // TList *ll = stack->GetHists(); + // if (ll && ll->GetEntries()) { + // + // // finally, ensure all hists are styled + // for (auto ho : *ll) { + // TH1 *hh = dynamic_cast(ho); + // if (!hh) + // continue; + // bool createdStyle = (xRooNode(*hh, *this).styles(nullptr, false).get() == nullptr); + // + // if (createdStyle) { + // // give hist a color, that isn't the same as any other hists color + // hh->SetFillStyle(1001); // solid fill style + // bool used = false; + // do { + // hh->SetFillColor((count++)); + // // check not already used this color + // used = false; + // for (auto ho2 : *ll) { + // TH1 *hh2 = dynamic_cast(ho2); + // if (!hh2) + // continue; + // auto _styleNode = xRooNode(*hh2, *this).styles(hh2, false); + // auto _style = _styleNode.get(); + // if (hh != hh2 && _style && _style->GetFillColor() == hh->GetFillColor()) { + // used = true; + // break; + // } + // } + // } while (used); + // } + // + // auto _styleNode = xRooNode(*hh, *this).styles(hh); + // if (auto _style = _styleNode.get()) { + // *dynamic_cast(hh) = *_style; + // *dynamic_cast(hh) = *_style; + // *dynamic_cast(hh) = *_style; + // } + // // for stacks, fill color of white should be color 10 unless fill style is 0 + // if (hh->GetFillColor() == kWhite && hh->GetFillStyle() != 0) { + // // kWhite means 'transparent' in ROOT ... should really use a FillStyle of 0 for that + // // so assume user wanted actual white, which is color 10 + // hh->SetFillColor(10); + // } + // addLegendEntry(hh, hh->GetTitle(), "f"); + // } + // } + // } } else if (!overlayExisted) { if (errHist) { addLegendEntry(errHist, strlen(errHist->GetTitle()) ? errHist->GetTitle() : GetName(), "fl"); } else { - addLegendEntry(h, strlen(h->GetTitle()) ? h->GetTitle() : GetName(), "l"); + addLegendEntry(h, strlen(h->GetTitle()) ? h->GetTitle() : GetName(), (hasError) ? "fl" : "l"); } } @@ -10566,6 +11260,8 @@ void xRooNode::Draw(Option_t *opt) ratioPad->SetRightMargin(gPad->GetRightMargin()); ratioPad->cd(); TH1 *ratioHist = dynamic_cast((errHist) ? errHist->Clone("auxHist") : h->Clone("auxHist")); + ratioHist->Reset(); + ratioHist->Add(h); // removes function list ratioHist->SetDirectory(nullptr); ratioHist->SetTitle((errHist) ? errHist->GetName() : h->GetName()); // abuse the title string to hold the name of the main hist @@ -10693,7 +11389,7 @@ void xRooNode::Draw(Option_t *opt) // now draw selected datasets on top if this was a pdf if (auto _pdf = get(); - !hasSame && _pdf /*&& (_pdf->canBeExtended() || robs().empty())*/ && coefs().empty()) { + !hasSame && _pdf /*&& (_pdf->canBeExtended() || robs().empty())*/ && coefs(true).empty()) { auto _dsets = datasets(); // bool _drawn=false; for (auto &d : _dsets) { @@ -10717,6 +11413,8 @@ void xRooNode::SaveAs(const char *filename, Option_t *option) const TString sOpt(option); sOpt.ToLower(); if (auto w = get(); w) { + // ensure the current color set is saved in the workspace + w->import(*gROOT->GetListOfColors(), true); if (TString(filename).EndsWith(".json")) { #if ROOT_VERSION_CODE >= ROOT_VERSION(6, 26, 00) @@ -10799,9 +11497,9 @@ void xRooNode::SaveAs(const char *filename, Option_t *option) const } } -double xRooNode::GetBinError(int bin, const xRooNode &fr) const +double xRooNode::GetBinError(int bin, const xRooNode &fr, int nToys, bool errorsHi, bool errorsLo) const { - auto res = GetBinErrors(bin, bin, fr); + auto res = GetBinErrors(bin, bin, fr, nToys, errorsHi, errorsLo); if (res.empty()) return std::numeric_limits::quiet_NaN(); return res.at(0); @@ -10943,7 +11641,8 @@ std::pair xRooNode::IntegralAndError(const xRooNode &fr, const c return std::make_pair(out, err); } -std::vector xRooNode::GetBinErrors(int binStart, int binEnd, const xRooNode &_fr) const +std::vector +xRooNode::GetBinErrors(int binStart, int binEnd, const xRooNode &_fr, int nToys, bool errorHi, bool errorLo) const { // note: so far this method is inconsistent with the BuildHistogram in ways: // no projection over other variables @@ -10955,128 +11654,145 @@ std::vector xRooNode::GetBinErrors(int binStart, int binEnd, const xRooN if (binStart != binEnd || !fParent) { throw std::runtime_error(TString::Format("%s is a bin - only has one value", GetName())); } - return fParent->GetBinErrors(fBinNumber, fBinNumber); + return fParent->GetBinErrors(fBinNumber, fBinNumber, _fr); } std::vector out; - auto o = dynamic_cast(get()); - if (!o) + auto _hist = BuildHistogram(nullptr, true, true, binStart, binEnd, _fr, errorHi, errorLo, nToys); + if (!_hist) return out; - - std::shared_ptr fr = std::dynamic_pointer_cast(_fr.fComp); - //= dynamic_cast( _fr.get() ? _fr->Clone() : fitResult()->Clone()); - - auto _coefs = coefs(); - - if (!fr) { - // need to ensure coefs, if any, are included in fit result retrieval so all pars are loaded - auto frn = (!_coefs.get() ? *this : xRooNode(RooProduct("tmp", "tmp", RooArgList(*o, *_coefs.get())))) - .fitResult(); - if (strlen(_fr.GetName())) - frn = frn.reduced(_fr.GetName()); - - // use name to reduce the fit result, if one given - fr = std::dynamic_pointer_cast(frn.fComp); - } - - if (!GETDMP(fr.get(), _finalPars)) { - fr->setFinalParList(RooArgList()); - } - - /// Oct2022: No longer doing this because want to allow fitResult to be used to get partial error - // // need to add any floating parameters not included somewhere already in the fit result ... - // RooArgList l; - // for(auto& p : pars()) { - // auto v = p->get(); - // if (!v) continue; - // if (v->isConstant()) continue; - // if (fr->floatParsFinal().find(v->GetName())) continue; - // if (fr->_constPars && fr->_constPars->find(v->GetName())) continue; - // l.add(*v); - // } - // - // if (!l.empty()) { - // RooArgList l2; l2.addClone(fr->floatParsFinal()); - // l2.addClone(l); - // fr->setFinalParList(l2); - // } - - TMatrixTSym *prevCov = static_cast *>(GETDMP(fr.get(), _VM)); - - if (!prevCov || size_t(prevCov->GetNcols()) < fr->floatParsFinal().size()) { - TMatrixDSym cov(fr->floatParsFinal().size()); - if (prevCov) { - for (int i = 0; i < prevCov->GetNcols(); i++) { - for (int j = 0; j < prevCov->GetNrows(); j++) { - cov(i, j) = (*prevCov)(i, j); - } - } - } - int i = 0; - for (auto &p : fr->floatParsFinal()) { - if (!prevCov || i >= prevCov->GetNcols()) { - cov(i, i) = pow(dynamic_cast(p)->getError(), 2); - } - i++; - } - int covQualBackup = fr->covQual(); - fr->setCovarianceMatrix(cov); - fr->setCovQual(covQualBackup); - } - - bool doBinWidth = false; - auto ax = (binStart == -1 && binEnd == -1) ? nullptr : GetXaxis(); - - auto _obs = obs(); // may own an obs so keep alive here - RooArgList normSet = _obs.argList(); - // to give consistency with BuildHistogram method, should be only the axis var if defined - if (ax) { - normSet.clear(); - normSet.add(*dynamic_cast(ax->GetParent())); - } - - if (auto p = dynamic_cast(o); ax && (p || _coefs.get() || o->getAttribute("density"))) { - // pdfs of samples embedded in a sumpdf (aka have a coef) will convert their density value to a content - doBinWidth = true; - } if (binEnd == 0) { - if (ax) { - binEnd = ax->GetNbins(); - } else { - binEnd = binStart; - } + binEnd = _hist->GetNbinsX(); + } else if (binEnd == binStart && binEnd == -1) { + binStart = 1; + binEnd = 1; // done an integral, so histogram has only 1 bin } for (int bin = binStart; bin <= binEnd; bin++) { - if (ax) - dynamic_cast(ax->GetParent())->setBin(bin - 1, ax->GetName()); - // if (!SetBin(bin)) { return out; } - - double res; - if (auto p = dynamic_cast(o); p) { - // fr->covarianceMatrix().Print(); - res = PdfWrapper(*p, _coefs.get(), !ax).getSimplePropagatedError(*fr, normSet); -#if ROOT_VERSION_CODE < ROOT_VERSION(6, 27, 00) - // improved normSet invalidity checking, so assuming no longer need this in 6.28 onwards - p->_normSet = nullptr; -#endif - } else { - // res = o->getPropagatedError(*fr, normSet); - // // TODO: What if coef has error? - probably need a FuncWrapper class - // if (auto c = _coefs.get(); c) { - // res *= c->getVal(normSet); - // } - res = RooProduct("errorEval", "errorEval", - RooArgList(*o, !_coefs.get() ? RooFit::RooConst(1) : *_coefs.get())) - .getPropagatedError(*fr, normSet); - } - if (doBinWidth) { - res *= ax->GetBinWidth(bin); - } - out.push_back(res); + out.push_back(((errorLo && !errorHi) ? (-1.) : 1.) * + _hist->GetBinError(bin)); // using same convention as RooFit that Lo errors are negative } - + delete _hist; return out; + + // auto o = dynamic_cast(get()); + // if (!o) + // return out; + // + // std::shared_ptr fr = std::dynamic_pointer_cast(_fr.fComp); + // //= dynamic_cast( _fr.get() ? _fr->Clone() : fitResult()->Clone()); + // + // auto _coefs = coefs(); + // + // if (!fr) { + // // need to ensure coefs, if any, are included in fit result retrieval so all pars are loaded + // auto frn = (!_coefs.get() ? *this : xRooNode(RooProduct("tmp", "tmp", RooArgList(*o, + // *_coefs.get())))) + // .fitResult(); + // if (strlen(_fr.GetName())) + // frn = frn.reduced(_fr.GetName()); + // + // // use name to reduce the fit result, if one given + // fr = std::dynamic_pointer_cast(frn.fComp); + // } + // + // if (!GETDMP(fr.get(), _finalPars)) { + // fr->setFinalParList(RooArgList()); + // } + // + // /// Oct2022: No longer doing this because want to allow fitResult to be used to get partial error + // // // need to add any floating parameters not included somewhere already in the fit result ... + // // RooArgList l; + // // for(auto& p : pars()) { + // // auto v = p->get(); + // // if (!v) continue; + // // if (v->isConstant()) continue; + // // if (fr->floatParsFinal().find(v->GetName())) continue; + // // if (fr->_constPars && fr->_constPars->find(v->GetName())) continue; + // // l.add(*v); + // // } + // // + // // if (!l.empty()) { + // // RooArgList l2; l2.addClone(fr->floatParsFinal()); + // // l2.addClone(l); + // // fr->setFinalParList(l2); + // // } + // + // TMatrixTSym *prevCov = static_cast *>(GETDMP(fr.get(), _VM)); + // + // if (!prevCov || size_t(prevCov->GetNcols()) < fr->floatParsFinal().size()) { + // TMatrixDSym cov(fr->floatParsFinal().size()); + // if (prevCov) { + // for (int i = 0; i < prevCov->GetNcols(); i++) { + // for (int j = 0; j < prevCov->GetNrows(); j++) { + // cov(i, j) = (*prevCov)(i, j); + // } + // } + // } + // int i = 0; + // for (auto &p : fr->floatParsFinal()) { + // if (!prevCov || i >= prevCov->GetNcols()) { + // cov(i, i) = pow(dynamic_cast(p)->getError(), 2); + // } + // i++; + // } + // int covQualBackup = fr->covQual(); + // fr->setCovarianceMatrix(cov); + // fr->setCovQual(covQualBackup); + // } + // + // bool doBinWidth = false; + // auto ax = (binStart == -1 && binEnd == -1) ? nullptr : GetXaxis(); + // + // auto _obs = obs(); // may own an obs so keep alive here + // RooArgList normSet = _obs.argList(); + // // to give consistency with BuildHistogram method, should be only the axis var if defined + // if (ax) { + // normSet.clear(); + // normSet.add(*dynamic_cast(ax->GetParent())); + // } + // + // if (auto p = dynamic_cast(o); ax && (p || _coefs.get() || o->getAttribute("density"))) { + // // pdfs of samples embedded in a sumpdf (aka have a coef) will convert their density value to a content + // doBinWidth = true; + // } + // if (binEnd == 0) { + // if (ax) { + // binEnd = ax->GetNbins(); + // } else { + // binEnd = binStart; + // } + // } + // for (int bin = binStart; bin <= binEnd; bin++) { + // if (ax) + // dynamic_cast(ax->GetParent())->setBin(bin - 1, ax->GetName()); + // // if (!SetBin(bin)) { return out; } + // + // double res; + // if (auto p = dynamic_cast(o); p) { + // // fr->covarianceMatrix().Print(); + // res = PdfWrapper(*p, _coefs.get(), !ax).getSimplePropagatedError(*fr, normSet); + // #if ROOT_VERSION_CODE < ROOT_VERSION(6, 27, 00) + // // improved normSet invalidity checking, so assuming no longer need this in 6.28 onwards + // p->_normSet = nullptr; + // #endif + // } else { + // // res = o->getPropagatedError(*fr, normSet); + // // // TODO: What if coef has error? - probably need a FuncWrapper class + // // if (auto c = _coefs.get(); c) { + // // res *= c->getVal(normSet); + // // } + // res = RooProduct("errorEval", "errorEval", + // RooArgList(*o, !_coefs.get() ? RooFit::RooConst(1) : *_coefs.get())) + // .getPropagatedError(*fr, normSet); + // } + // if (doBinWidth) { + // res *= ax->GetBinWidth(bin); + // } + // out.push_back(res); + // } + // + // return out; } std::string cling::printValue(const xRooNode *v) @@ -11094,7 +11810,7 @@ std::string cling::printValue(const xRooNode *v) out += "{"; out += n->GetName(); if (out.length() > 100 && left > 0) { - out += TString::Format(",... and %zu more", left); + out += TString::Format(",... and %lu more", left); break; } } diff --git a/roofit/xroofit/test/CMakeLists.txt b/roofit/xroofit/test/CMakeLists.txt new file mode 100644 index 0000000000000..5ce905ed3000f --- /dev/null +++ b/roofit/xroofit/test/CMakeLists.txt @@ -0,0 +1,7 @@ +# Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. +# All rights reserved. +# +# For the licensing terms see $ROOTSYS/LICENSE. +# For the list of contributors see $ROOTSYS/README/CREDITS. + +ROOT_ADD_PYUNITTEST(xroofit_python xroofit_python.py) diff --git a/roofit/xroofit/test/xroofit_python.py b/roofit/xroofit/test/xroofit_python.py new file mode 100644 index 0000000000000..4c33fadea8ebd --- /dev/null +++ b/roofit/xroofit/test/xroofit_python.py @@ -0,0 +1,91 @@ +import unittest + +import ROOT + + +class XRooFitTests(unittest.TestCase): + + def test_oneChannelLimit(self): + """ + Tests creating a workspace (from histograms) containing pdf and dataset + for a single-channel, 3-bin model, with sig and bkg samples + and computing the CLs upper limit on the signal strength POI + """ + + # create some histograms to represent the samples, syst variations, and obsData + from array import array + + bkg = ROOT.TH1D("bkg", "Background", 3, 0, 3) + bkg.SetContent(array("d", [0, 15, 14, 13, 0])) # first and last are under/overflow + sig = ROOT.TH1D("sig", "Signal", 3, 0, 3) + sig.SetContent(array("d", [0, 4, 5, 6, 0])) + bkg_vary1 = bkg.Clone("alphaSyst=1") + bkg_vary1.SetContent(array("d", [0, 16, 13, 12, 0])) + obsData = ROOT.TH1D("obsData", "Data", 3, 0, 3) + obsData.SetContent(array("d", [0, 17, 15, 13, 0])) + bkg.SetFillColor(ROOT.kRed) # can e.g. add style settings to histograms, they will propagate into the workspace + + import ROOT.Experimental.XRooFit as XRF + + # create workspace + w = XRF.xRooNode("RooWorkspace", name="combined", title="combined") + # create a pdf and add a "SR" channel to it + pdf = w["pdfs"].Add("simPdf") + sr = pdf.Add("SR") + # add our samples to the channel + sr_bkg = sr.Add(bkg) + sr_sig = sr.Add(sig) + # add the variation on the bkg sample + sr_bkg.Vary(bkg_vary1) + # constrain the nuisance parameter that was created + pdf.pars()["alphaSyst"].Constrain("normal") # normal gaussian constraint + # create a signal strength POI and scale the sig term by it + w.poi().Add("mu[1]") + sr_sig.Multiply("mu") + # add the obsData to the channel + sr.datasets().Add(obsData) + + # example of accessing yields with propagated errors + w.poi()["mu"].setVal(0) + self.assertAlmostEqual(w["pdfs/simPdf/SR"].GetContent(), bkg.Integral()) + self.assertAlmostEqual(w["pdfs/simPdf/SR"].GetError(), 1) + + # could save the workspace as this point like this: + # w.SaveAs("ws_test_oneChannelLimit.root") + + # run a limit by creating a hypoSpace for the POI + hs = w.nll("obsData").hypoSpace("mu") + # when cls limit scan happens, fits are run. If we have an open writable TFile, all results will cache there + f = ROOT.TFile("fitCache.root", "RECREATE") + hs.scan("cls", nPoints=0, low=0, high=10) # nPoints=0 means will do an auto-scan for the limit + f.Close() + limits = dict(hs.limits()) # accesses the limits in the form of a dict + + for k, v in limits.items(): + print(k, "sigma expected limit =" if k != "obs" else "observed limit =", v.value(), "+/-", v.error()) + # do a basic check that all values and errors are valid + # and nan values are indicative of problems + assert not ROOT.TMath.IsNaN(v.value()) + assert not ROOT.TMath.IsNaN(v.error()) + + # example code for accessing the fit results of the fits that were run as part of the computation + def printInfo(hp, prefix=""): + print(prefix, "null mu=", hp.fNullVal(), "alt mu=", hp.fAltVal()) + fitResults = { + "ufit": hp.ufit(readOnly=True), # readOnly ensures wont try to do the fit if it wasn't needed + "cfit_null": hp.cfit_null(readOnly=True), # conditional fit with null mu value + "cfit_alt": hp.cfit_alt(readOnly=True), # conditional fit with alt mu value (usually mu=0) + "cfit_lbound": hp.cfit_lbound(readOnly=True), # sometimes necessary for lower-bound test statistics + "gfit": hp.gfit(), # the fit result from which the data was generated, if the data for this point is generated + } + for k, v in fitResults.items(): + print(prefix, " ", k, "status =", v.status() if v else "N/A") + + for hp in hs: # loop over the hypoPoints in the hypoSpace + printInfo(hp) + if hp.asimov(): + printInfo(hp.asimov(), " asimov") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 2c7a68887c4ea..57927c68f21d1 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -140,17 +140,20 @@ if(geom) FAILREGEX "FAILED|Error in") #--stressGeometry------------------------------------------------------------------------------------ - ROOT_EXECUTABLE(stressGeometry stressGeometry.cxx LIBRARIES Geom Tree GenVector Gpad) - ROOT_ADD_TEST(test-stressgeometry COMMAND stressGeometry -b FAILREGEX "FAILED|Error in" LABELS longtest) - ROOT_ADD_TEST(test-stressgeometry-interpreted COMMAND ${ROOT_root_CMD} -b -q -l ${CMAKE_CURRENT_SOURCE_DIR}/stressGeometry.cxx - FAILREGEX "FAILED|Error in" DEPENDS test-stressgeometry LABELS longtest) + if (NOT MSVC) # Prevents the access to the web, which could happen through https + ROOT_EXECUTABLE(stressGeometry stressGeometry.cxx LIBRARIES Geom Tree GenVector Gpad) + ROOT_ADD_TEST(test-stressgeometry COMMAND stressGeometry -b FAILREGEX "FAILED|Error in" LABELS longtest) + ROOT_ADD_TEST(test-stressgeometry-interpreted COMMAND ${ROOT_root_CMD} -b -q -l ${CMAKE_CURRENT_SOURCE_DIR}/stressGeometry.cxx + FAILREGEX "FAILED|Error in" DEPENDS test-stressgeometry LABELS longtest) + endif() endif() #--stressLinear------------------------------------------------------------------------------------ +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/linearIO.root ${CMAKE_CURRENT_BINARY_DIR} COPYONLY) ROOT_EXECUTABLE(stressLinear stressLinear.cxx LIBRARIES Matrix Hist RIO) ROOT_ADD_TEST(test-stresslinear COMMAND stressLinear FAILREGEX "FAILED|Error in" LABELS longtest) ROOT_ADD_TEST(test-stresslinear-interpreted COMMAND ${ROOT_root_CMD} -b -q -l ${CMAKE_CURRENT_SOURCE_DIR}/stressLinear.cxx - FAILREGEX "FAILED|Error in" DEPENDS test-stresslinear LABELS longtest) + FAILREGEX "FAILED|Error in" LABELS longtest) #--stressGraphics------------------------------------------------------------------------------------ if(ROOT_opengl_FOUND) @@ -175,14 +178,15 @@ if(ROOT_opengl_FOUND) COMMAND ${ROOT_root_CMD} -b -q -l ${CMAKE_CURRENT_SOURCE_DIR}/stressGraphics.cxx FAILREGEX "FAILED|Error in" DEPENDS test-stressgraphics) - if(CHROME_EXECUTABLE) - ROOT_ADD_TEST(test-stressgraphics-chrome - RUN_SERIAL - ENVIRONMENT LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/lib:$ENV{LD_LIBRARY_PATH} - COMMAND stressGraphics -b -k -p=sgc --web=chrome - FAILREGEX "FAILED|Error in" - LABELS longtest) - endif() + # Disabled until the failures on Fedora 41 are addressed + # if(CHROME_EXECUTABLE) + # ROOT_ADD_TEST(test-stressgraphics-chrome + # RUN_SERIAL + # ENVIRONMENT LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/lib:$ENV{LD_LIBRARY_PATH} + # COMMAND stressGraphics -b -k -p=sgc --web=chrome + # FAILREGEX "FAILED|Error in" + # LABELS longtest) + # endif() if(FIREFOX_EXECUTABLE AND NOT APPLE) ROOT_ADD_TEST(test-stressgraphics-firefox-skip3d RUN_SERIAL @@ -194,10 +198,11 @@ if(ROOT_opengl_FOUND) endif() #--stressHistogram------------------------------------------------------------------------------------ +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/stressHistogram.5.18.00.root ${CMAKE_CURRENT_BINARY_DIR} COPYONLY) ROOT_EXECUTABLE(stressHistogram stressHistogram.cxx LIBRARIES Hist RIO) ROOT_ADD_TEST(test-stresshistogram COMMAND stressHistogram FAILREGEX "FAILED|Error in" LABELS longtest) ROOT_ADD_TEST(test-stresshistogram-interpreted COMMAND ${ROOT_root_CMD} -b -q -l ${CMAKE_CURRENT_SOURCE_DIR}/stressHistogram.cxx - FAILREGEX "FAILED|Error in" DEPENDS test-stresshistogram ) + FAILREGEX "FAILED|Error in") #--stressGUI--------------------------------------------------------------------------------------- if(ROOT_asimage_FOUND) diff --git a/test/linearIO.root b/test/linearIO.root new file mode 100644 index 0000000000000..d6170f5e1fa39 Binary files /dev/null and b/test/linearIO.root differ diff --git a/test/stressHistogram.5.18.00.root b/test/stressHistogram.5.18.00.root new file mode 100644 index 0000000000000..4d6f2e314921c Binary files /dev/null and b/test/stressHistogram.5.18.00.root differ diff --git a/test/stressHistogram.cxx b/test/stressHistogram.cxx index 01d10d3052b25..8e153d92209d9 100644 --- a/test/stressHistogram.cxx +++ b/test/stressHistogram.cxx @@ -97,6 +97,12 @@ #include #include +#ifdef __CLING__ +const auto tmp_root_file_name = "tmp_hist_interpreted.root"; +#else +const auto tmp_root_file_name = "tmp_hist.root"; +#endif + using std::ostringstream, std::cout, std::endl, std::string; const unsigned int __DRAW__ = 0; @@ -132,7 +138,7 @@ enum RefFileEnum { const int refFileOption = 1; TFile * refFile = 0; -const char* refFileName = "http://root.cern/files/stressHistogram.5.18.00.root"; +const char* refFileName = "./stressHistogram.5.18.00.root"; TRandom2 r; // set to zero if want to run different numbers every time @@ -2956,11 +2962,11 @@ bool testWriteRead1D() h1->Fill(value, 1.0); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); h1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); TH1D* h2 = static_cast ( f2.Get("wr1D_h1") ); bool ret = equals("Read/Write Hist 1D", h1, h2, cmpOptStats); @@ -2984,11 +2990,11 @@ bool testWriteReadVar1D() h1->Fill(value, 1.0); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); h1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); TH1D* h2 = static_cast ( f2.Get("wr1D_h1") ); bool ret = equals("Read/Write VarH 1D", h1, h2, cmpOptStats); @@ -3008,11 +3014,11 @@ bool testWriteReadProfile1D() p1->Fill(x, y, 1.0); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); p1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); TProfile* p2 = static_cast ( f2.Get("wr1D_p1") ); bool ret = equals("Read/Write Prof 1D", p1, p2, cmpOptStats); @@ -3035,11 +3041,11 @@ bool testWriteReadProfileVar1D() p1->Fill(x, y, 1.0); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); p1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); TProfile* p2 = static_cast ( f2.Get("wr1D_p1") ); bool ret = equals("Read/Write VarP 1D", p1, p2, cmpOptStats); @@ -3063,11 +3069,11 @@ bool testWriteRead2D() h1->Fill(x, y, 1.0); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); h1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); TH2D* h2 = static_cast ( f2.Get("wr2D_h1") ); bool ret = equals("Read/Write Hist 2D", h1, h2, cmpOptStats); @@ -3090,11 +3096,11 @@ bool testWriteReadProfile2D() p1->Fill(x, y, z, 1.0); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); p1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); TProfile2D* p2 = static_cast ( f2.Get("wr2D_p1") ); bool ret = equals("Read/Write Prof 2D", p1, p2, cmpOptStats); @@ -3120,11 +3126,11 @@ bool testWriteRead3D() h1->Fill(x, y, z, 1.0); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); h1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); TH3D* h2 = static_cast ( f2.Get("wr3D_h1") ); bool ret = equals("Read/Write Hist 3D", h1, h2, cmpOptStats); @@ -3149,11 +3155,11 @@ bool testWriteReadProfile3D() p1->Fill(x, y, z, t, 1.0); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); p1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); TProfile3D* p2 = static_cast ( f2.Get("wr3D_p1") ); // In this particular case the statistics are not checked. The @@ -3188,11 +3194,11 @@ bool testWriteReadHn() s1->Fill(points); } - TFile f("tmpHist.root", "RECREATE"); + TFile f(tmp_root_file_name, "RECREATE"); s1->Write(); f.Close(); - TFile f2("tmpHist.root"); + TFile f2(tmp_root_file_name); HIST* s2 = static_cast ( f2.Get("wrS-s1") ); bool ret = equals(TString::Format("Read/Write Hist %s", HIST::Class()->GetName()), s1, s2, cmpOptStats); diff --git a/test/stressLinear.cxx b/test/stressLinear.cxx index 5169f8bd7ed27..71846f53ac276 100644 --- a/test/stressLinear.cxx +++ b/test/stressLinear.cxx @@ -120,6 +120,16 @@ #include "TMatrixDEigen.h" #include "TMatrixDSymEigen.h" +#ifdef __CLING__ +const auto tmp_vmatrix_file_name = "stress-vmatrix_interpreted.root"; +const auto tmp_vvector_file_name = "stress-vvector_interpreted.root"; +const auto tmp_vdecomp_file_name ="stress-vdecomp_interpreted.root"; +#else +const auto tmp_vmatrix_file_name = "stress-vmatrix.root"; +const auto tmp_vvector_file_name = "stress-vvector.root"; +const auto tmp_vdecomp_file_name = "stress-vdecomp.root"; +#endif + void stressLinear (Int_t maxSizeReq=100,Int_t verbose=0); void StatusPrint (Int_t id,const TString &title,Bool_t status); @@ -2075,7 +2085,7 @@ void mstress_matrix_io() Bool_t ok = kTRUE; const Double_t pattern = TMath::Pi(); - TFile *f = new TFile("stress-vmatrix.root", "RECREATE"); + TFile *f = new TFile(tmp_vmatrix_file_name, "RECREATE"); Char_t name[80]; Int_t iloop = gNrLoop; @@ -2122,7 +2132,7 @@ void mstress_matrix_io() if (gVerbose) std::cout << "\nOpen database in read-only mode and read matrix" << std::endl; - TFile *f1 = new TFile("stress-vmatrix.root"); + TFile *f1 = new TFile(tmp_vmatrix_file_name); iloop = gNrLoop; while (iloop >= 0) { @@ -3110,7 +3120,7 @@ void spstress_matrix_io() Bool_t ok = kTRUE; const Double_t pattern = TMath::Pi(); - TFile *f = new TFile("stress-vmatrix.root", "RECREATE"); + TFile *f = new TFile(tmp_vmatrix_file_name, "RECREATE"); Char_t name[80]; Int_t iloop = gNrLoop; @@ -3143,7 +3153,7 @@ void spstress_matrix_io() if (gVerbose) std::cout << "\nOpen database in read-only mode and read matrix" << std::endl; - TFile *f1 = new TFile("stress-vmatrix.root"); + TFile *f1 = new TFile(tmp_vmatrix_file_name); iloop = gNrLoop; while (iloop >= 0) { @@ -3749,7 +3759,7 @@ void vstress_vector_io() Bool_t ok = kTRUE; const Double_t pattern = TMath::Pi(); - TFile *f = new TFile("stress-vvector.root","RECREATE"); + TFile *f = new TFile(tmp_vvector_file_name,"RECREATE"); Char_t name[80]; Int_t iloop = gNrLoop; @@ -3786,7 +3796,7 @@ void vstress_vector_io() if (gVerbose) std::cout << "\nOpen database in read-only mode and read vector" << std::endl; - TFile *f1 = new TFile("stress-vvector.root"); + TFile *f1 = new TFile(tmp_vvector_file_name); iloop = gNrLoop; while (iloop >= 0) { @@ -4275,7 +4285,7 @@ void astress_decomp_io(Int_t msize) if (gVerbose) std::cout << "\nWrite decomp m to database" << std::endl; - TFile *f = new TFile("stress-vdecomp.root", "RECREATE"); + TFile *f = new TFile(tmp_vdecomp_file_name, "RECREATE"); TDecompLU lu(m,1.0e-20); TDecompQRH qrh(m,1.0e-20); @@ -4295,7 +4305,7 @@ void astress_decomp_io(Int_t msize) if (gVerbose) std::cout << "\nOpen database in read-only mode and read matrix" << std::endl; - TFile *f1 = new TFile("stress-vdecomp.root"); + TFile *f1 = new TFile(tmp_vdecomp_file_name); if (gVerbose) std::cout << "\nRead decompositions should create same solutions" << std::endl; @@ -4379,8 +4389,7 @@ void astress_decomp_io(Int_t msize) void stress_backward_io() { - TFile::SetCacheFileDir("."); - TFile *f = TFile::Open("http://root.cern/files/linearIO.root","CACHEREAD"); + TFile *f = TFile::Open("./linearIO.root"); TMatrixF mf1 = THilbertMatrixF(-5,5,-5,5); mf1[1][2] = TMath::Pi(); @@ -4418,7 +4427,7 @@ void stress_backward_io() void cleanup() { - gSystem->Unlink("stress-vmatrix.root"); - gSystem->Unlink("stress-vvector.root"); - gSystem->Unlink("stress-vdecomp.root"); + gSystem->Unlink(tmp_vmatrix_file_name); + gSystem->Unlink(tmp_vvector_file_name); + gSystem->Unlink(tmp_vdecomp_file_name); } diff --git a/tmva/pymva/src/MethodPyAdaBoost.cxx b/tmva/pymva/src/MethodPyAdaBoost.cxx index 0af7bfc7dd021..0ad1533b6b8be 100644 --- a/tmva/pymva/src/MethodPyAdaBoost.cxx +++ b/tmva/pymva/src/MethodPyAdaBoost.cxx @@ -67,7 +67,7 @@ MethodPyAdaBoost::MethodPyAdaBoost(const TString &jobName, fBaseEstimator("None"), fNestimators(50), fLearningRate(1.0), - fAlgorithm("SAMME.R"), + fAlgorithm("SAMME"), fRandomState("None") { } @@ -79,7 +79,7 @@ MethodPyAdaBoost::MethodPyAdaBoost(DataSetInfo &theData, fBaseEstimator("None"), fNestimators(50), fLearningRate(1.0), - fAlgorithm("SAMME.R"), + fAlgorithm("SAMME"), fRandomState("None") { } @@ -116,12 +116,13 @@ void MethodPyAdaBoost::DeclareOptions() ``learning_rate``. There is a trade-off between ``learning_rate`` and\ ``n_estimators``."); - DeclareOptionRef(fAlgorithm, "Algorithm", "{'SAMME', 'SAMME.R'}, optional (default='SAMME.R')\ + DeclareOptionRef(fAlgorithm, "Algorithm", "{'SAMME', 'SAMME.R'}, optional (default='SAMME')\ If 'SAMME.R' then use the SAMME.R real boosting algorithm.\ ``base_estimator`` must support calculation of class probabilities.\ If 'SAMME' then use the SAMME discrete boosting algorithm.\ The SAMME.R algorithm typically converges faster than SAMME,\ - achieving a lower test error with fewer boosting iterations."); + achieving a lower test error with fewer boosting iterations.\ + 'SAME.R' is deprecated since version 1.4 and removed since 1.6"); DeclareOptionRef(fRandomState, "RandomState", "int, RandomState instance or None, optional (default=None)\ If int, random_state is the seed used by the random number generator;\ @@ -309,11 +310,11 @@ std::vector MethodPyAdaBoost::GetMvaValues(Long64_t firstEvt, Long64_t Py_DECREF(result); if (logProgress) { - Log() << kINFO + Log() << kINFO << "Elapsed time for evaluation of " << nEvents << " events: " << timer.GetElapsedTime() << " " << Endl; } - + return mvaValues; } diff --git a/tmva/sofie/inc/TMVA/ROperator_BasicUnary.hxx b/tmva/sofie/inc/TMVA/ROperator_BasicUnary.hxx index e573fea735c9b..f23686e92de69 100644 --- a/tmva/sofie/inc/TMVA/ROperator_BasicUnary.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_BasicUnary.hxx @@ -87,6 +87,14 @@ public: out << SP << "}\n"; return out.str(); } + + std::vector GetStdLibs() override { + if (Op == EBasicUnaryOperator::kSqrt || Op == EBasicUnaryOperator::kExp || Op == EBasicUnaryOperator::kLog) { + return { std::string("cmath") }; + } else { + return {}; + } + } }; } // namespace SOFIE diff --git a/tmva/sofie/inc/TMVA/ROperator_Concat.hxx b/tmva/sofie/inc/TMVA/ROperator_Concat.hxx index b599c3cbe98a3..88963b32759c1 100644 --- a/tmva/sofie/inc/TMVA/ROperator_Concat.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_Concat.hxx @@ -146,16 +146,6 @@ } fInputShapes.push_back(model.GetDynamicTensorShape(it)); } - // patch for concat for case {1,x} with {y} remove the 1 in first tensor - // if (fInputShapes.size() == 2) { - // if (fInputShapes[1].size() < fInputShapes[0].size()) { - // if (fInputShapes[0].front().dim == 1) { - // std::cout << "Patch concat and make first input with shape " << ConvertDynamicShapeToString(fInputShapes[0]) - // << " compatible with second input " << ConvertDynamicShapeToString(fInputShapes[1]) << "by removing firs dim" << std::endl; - // fInputShapes[0].erase(fInputShapes[0].begin()); - // } - // } - // } fOutputShape = ShapeInference(fInputShapes)[0]; if (model.Verbose()) std::cout << "Output of concat operator has shape " << ConvertDynamicShapeToString(fOutputShape) << std::endl; diff --git a/tmva/sofie/inc/TMVA/ROperator_LeakyRelu.hxx b/tmva/sofie/inc/TMVA/ROperator_LeakyRelu.hxx index b8c9ac19f8bd3..0ee858251bbeb 100644 --- a/tmva/sofie/inc/TMVA/ROperator_LeakyRelu.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_LeakyRelu.hxx @@ -64,7 +64,7 @@ public: std::stringstream out; size_t length = ConvertShapeToLength(fShape); - out << SP << "float " << OpName << "_alpha = " << std::setprecision(std::numeric_limits::max_digits10) << falpha << ";\n"; + out << SP << "constexpr float " << OpName << "_alpha = " << std::setprecision(std::numeric_limits::max_digits10) << falpha << ";\n"; out << "\n//------ LEAKY RELU\n"; out << SP << "for (int id = 0; id < " << length << " ; id++){\n"; diff --git a/tmva/sofie/inc/TMVA/ROperator_Reduce.hxx b/tmva/sofie/inc/TMVA/ROperator_Reduce.hxx index e0af00e4de3a2..ee660f888ef43 100644 --- a/tmva/sofie/inc/TMVA/ROperator_Reduce.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_Reduce.hxx @@ -16,7 +16,7 @@ namespace TMVA{ namespace Experimental{ namespace SOFIE{ -enum EReduceOpMode { ReduceMean, ReduceSum, ReduceSumsquare, ReduceProd, InvalidReduceOp }; +enum EReduceOpMode { ReduceMean, ReduceSum, ReduceSumSquare, ReduceProd, InvalidReduceOp }; template class ROperator_Reduce final : public ROperator @@ -38,7 +38,7 @@ public: std::string Name() { if (fReduceOpMode == ReduceMean) return "ReduceMean"; - else if (fReduceOpMode == ReduceSumsquare ) return "ReduceSumsquare"; + else if (fReduceOpMode == ReduceSumSquare ) return "ReduceSumSquare"; else if (fReduceOpMode == ReduceProd ) return "ReduceProd"; else if (fReduceOpMode == ReduceSum) return "ReduceSum"; return "Invalid"; @@ -112,8 +112,8 @@ public: } } - std::string Generate(std::string OpName){ - OpName = "op_" + OpName; + std::string Generate(std::string opName){ + opName = "op_" + opName; if (fShapeX.empty() || fShapeY.empty()) { throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first"); } @@ -134,40 +134,85 @@ public: // don't need to divide by last stride s[n-1] since it is 1 by definition std::stringstream out; - out << "\n//---- operator " << Name() << " " << OpName << "\n"; - // check where is reduced axes are last one. In this case we can do a faster implementation - bool reduceLastDims = true; + out << "\n//---- operator " << Name() << " " << opName << "\n"; + // check where is reduced axes are first or last one. In these case we can do a faster implementation + enum EReduceDim {kFirst, kLast, kMiddle}; + EReduceDim reduceDims = kLast; int kmin = fShapeX.size()-fAttrAxes.size(); for (int k = fShapeX.size()-1; k >= kmin; k--) { // if k is not a reduced axis is not last ones if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) { - reduceLastDims = false; + reduceDims = kMiddle; break; } } + if (reduceDims == kMiddle) { + reduceDims = kFirst; + // check if at the beginning + for (size_t k = 0; k < fAttrAxes.size(); k++) { + // if k is not a reduced axis is not first ones + if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) { + reduceDims = kMiddle; + break; + } + } + } size_t reducedLength = inputLength / outputLength; - if (reduceLastDims) { + if (reduceDims == kLast) { + //std::cout << "reduction for operator " << opName << " is last" << std::endl; // new faster implementation using a single loop + // faster to loop first on reduced dimension and then output + // reset output tensors + + // loop on output dimensions out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n"; + // loop on reduce dimensions std::string startingValue = (fReduceOpMode == ReduceProd) ? "1" : "0"; - out << SP << SP << ConvertTypeToString(GetTemplatedType(T())) << " reducedValue = " << startingValue << ";\n"; - // loop on reduced axis + out << SP << SP << "tensor_" << fNY << "[i] = " << startingValue << ";\n"; out << SP << SP << "for (size_t j = 0; j < " << reducedLength << "; j++) {\n"; + if (fReduceOpMode == ReduceProd) - out << SP << SP << SP << "reducedValue *= tensor_" << fNX << "[i * " << reducedLength << " + j];\n"; + out << SP << SP << SP << "tensor_" << fNY << "[i] *= tensor_" << fNX << "[i * " << reducedLength << " + j];\n"; else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean) - out << SP << SP << SP << "reducedValue *= tensor_" << fNX << "[i * " << reducedLength << " + j];\n"; - else if(fReduceOpMode == ReduceSumsquare) - out << SP << SP << SP << "reducedValue *= tensor_" << fNX << "[i * " << reducedLength << " + j] * tensor_" + out << SP << SP << SP << "tensor_" << fNY << "[i] += tensor_" << fNX << "[i * " << reducedLength << " + j];\n"; + else if(fReduceOpMode == ReduceSumSquare) + out << SP << SP << SP << "tensor_" << fNY << "[i] += tensor_" << fNX << "[i * " << reducedLength << " + j] * tensor_" << fNX << "[i * " << reducedLength << " + j];\n"; out << SP << SP << "}\n"; // end j loop if(fReduceOpMode == ReduceMean) - out << SP << SP << "reducedValue /= static_cast(" << reducedLength << ");\n"; - out << SP << SP << "tensor_" << fNY << "[i] = reducedValue;\n"; + out << SP << SP << "tensor_" << fNY << "[i] /= static_cast(" << reducedLength << ");\n"; + out << SP << "}\n"; // end i loop - } else - { // standard case + } else if (reduceDims == kFirst) { + //std::cout << "reduction for operator " << opName << " is first" << std::endl; + // case reduction is at beginning + // reset output tensors + if (fReduceOpMode == ReduceProd) + out << SP << "fTensor_" << fNY << ".assign(" << outputLength << ",1);\n"; + else + out << SP << "fTensor_" << fNY << ".assign(" << outputLength << ",0);\n"; + + out << SP << "for (size_t i = 0; i < " << reducedLength << "; i++) {\n"; + out << SP << SP << "for (size_t j = 0; j < " << outputLength << "; j++) {\n"; + if (fReduceOpMode == ReduceProd) + out << SP << SP << SP << "tensor_" << fNY << "[j] *= tensor_" << fNX << "[i * " << outputLength << " + j];\n"; + else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean) + out << SP << SP << SP << "tensor_" << fNY << "[j] += tensor_" << fNX << "[i * " << outputLength << " + j];\n"; + else if(fReduceOpMode == ReduceSumSquare) + out << SP << SP << SP << "tensor_" << fNY << "[j] += tensor_" << fNX << "[i * " << outputLength << " + j] * tensor_" + << fNX << "[i * " << outputLength << " + j];\n"; + out << SP << SP << "}\n"; // end j loop + out << SP << "}\n"; // end i loop + if(fReduceOpMode == ReduceMean) { + out << SP << "for (size_t j = 0; i < " << outputLength << "; j++) {\n"; + out << SP << SP << "tensor_" << fNY << "[j] /= static_cast(" << reducedLength << ");\n"; + out << SP << "}\n"; // end j loop + } + } + else + { // standard case + //std::cout << "reduction for operator " << opName << " is middle" << std::endl; // reset output tensors if (fReduceOpMode == ReduceProd) out << SP << "fTensor_" << fNY << ".assign(" << outputLength << ",1);\n"; @@ -193,7 +238,7 @@ public: out << SP << SP << "tensor_" << fNY << "[outputIndex] *= tensor_" << fNX << "[i];\n"; else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean) out << SP << SP << "tensor_" << fNY << "[outputIndex] += tensor_" << fNX << "[i];\n"; - else if (fReduceOpMode == ReduceSumsquare) { + else if (fReduceOpMode == ReduceSumSquare) { out << SP << SP << "tensor_" << fNY << "[outputIndex] += tensor_" << fNX << "[i] * tensor_" << fNX << "[i];\n"; } diff --git a/tmva/sofie/inc/TMVA/ROperator_Tile.hxx b/tmva/sofie/inc/TMVA/ROperator_Tile.hxx index 869cd55d9a439..3686db1e0914a 100644 --- a/tmva/sofie/inc/TMVA/ROperator_Tile.hxx +++ b/tmva/sofie/inc/TMVA/ROperator_Tile.hxx @@ -51,12 +51,17 @@ public: } fShapeInput=model.GetTensorShape(fNInput); - // Retrieve the data pointer for the repeats tensor + // if repeats vector is not initialized we cannot deduce shape of output + // not support for time being this case + if (!model.IsInitializedTensor(fNRepeats)) { + throw std::runtime_error("TMVA SOFIE Tile Op: non-initialized repeats input is not supported"); + } + + // Retrieve the data pointer for the repeats tensor auto repptr = model.GetInitializedTensorData(fNRepeats); // Cast the raw pointer to the appropriate type (size_t*) - auto repeat_shape = static_cast(repptr.get()); - - if (repeat_shape == nullptr) { + auto repeats_data = static_cast(repptr.get()); + if (repeats_data == nullptr) { throw std::runtime_error("Failed to retrieve the data for the repeats tensor."); } // Get the shape of the repeats tensor to determine the number of elements @@ -66,12 +71,18 @@ public: throw std::runtime_error("Repeats tensor is not 1D."); } size_t num_elements = repeats_shape[0]; - // Convert the data to a vector - std::vector repeats_vector(repeat_shape, repeat_shape + num_elements); + // Convert the data to a vector of size_t + std::vector repeats_vector(num_elements); + std::copy(repeats_data, repeats_data + num_elements, repeats_vector.begin()); + fShapeY = ShapeInference({fShapeInput,repeats_vector})[0]; model.AddIntermediateTensor(fNY, model.GetTensorType(fNInput), fShapeY); + + if (model.Verbose()) + std::cout << "Tile: " << fNInput << " " << ConvertShapeToString(fShapeInput) << " -> " << fNY << " with shape " << ConvertShapeToString(fShapeY) + << " given repeats " << ConvertShapeToString(repeats_vector) << std::endl; } std::string Generate(std::string OpName){ @@ -89,17 +100,13 @@ public: std::string output = "tensor_" + fNY; out << "///-------- Tile operator\n"; out << "{\n"; // add scope to re-use same names - out << "std::vector input_shape = " << ConvertShapeToString(fShapeInput) << ";\n"; - std::vector repeats = fShapeY; - for (size_t i = 0; i < repeats.size(); i++) - repeats[i] /= fShapeInput[i]; + out << "const int input_shape[" << fShapeInput.size() << "] = " << ConvertShapeToString(fShapeInput) << ";\n"; - out << "std::vector repeats = " << ConvertShapeToString(repeats) << ";\n"; out << "int inputLength = " << ConvertShapeToLength(fShapeInput) << ";\n"; out << "int s = 1;\n"; // loop from inverse dim order out << "for (int i = " << fShapeInput.size()-1 << "; i >=0; i--) {\n"; - out << SP << "int r = repeats[i];\n"; + out << SP << "int r = tensor_" << fNRepeats << "[i];\n"; // we cannot exclude case where repeats=1 since we need offset //out << SP << "if (r == 1 && i < " << fShapeInput.size()-1 << ") continue;\n"; out << SP << "int i_offset = 0, o_offset = 0;\n"; diff --git a/tmva/sofie/test/TestCustomModelsFromONNX.cxx b/tmva/sofie/test/TestCustomModelsFromONNX.cxx index 002f0e697e609..8be28073c5ce0 100644 --- a/tmva/sofie/test/TestCustomModelsFromONNX.cxx +++ b/tmva/sofie/test/TestCustomModelsFromONNX.cxx @@ -33,6 +33,11 @@ #include "ReduceProd_FromONNX.hxx" #include "input_models/references/ReduceProd.ref.hxx" +// hardcode reference +#include "ReduceSum_FromONNX.hxx" + +#include "ReduceSumSquare_FromONNX.hxx" + #include "Shape_FromONNX.hxx" #include "input_models/references/Shape.ref.hxx" @@ -1179,6 +1184,60 @@ TEST(ONNX, Pow_broadcast){ } +TEST(ONNX, ReduceSum){ + constexpr float TOLERANCE = DEFAULT_TOLERANCE; + + + // Preparing the standard input + std::vector input({ + 5, 2, 3, + 5, 5, 4 + }); + + // test Reduce sum in all axis and keeping the dimension + // input tensor is shape [1,2,3] + // output tensod is shape [1,1,1] and value = 24 (sum of all elements) + + TMVA_SOFIE_ReduceSum::Session s("ReduceSum_FromONNX.dat"); + std::vector output = s.infer(input.data()); + // Checking output size + EXPECT_EQ(output.size(), 1); + + float correct[] = {24}; + + // Checking every output value, one by one + for (size_t i = 0; i < output.size(); ++i) { + EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE); + } +} + +TEST(ONNX, ReduceSumSquare){ + constexpr float TOLERANCE = DEFAULT_TOLERANCE; + + + // Preparing the standard input + std::vector input({ + 5, 2, 3, + 5, 5, 4 + }); + + // reduce on last axis and do not keep dimension + // output should be [1,2] and [25+4+9, 25+25+16] + + + TMVA_SOFIE_ReduceSumSquare::Session s("ReduceSumSquare_FromONNX.dat"); + std::vector output = s.infer(input.data()); + // Checking output size + EXPECT_EQ(output.size(), 2); + + float correct[] = {38, 66}; + + // Checking every output value, one by one + for (size_t i = 0; i < output.size(); ++i) { + EXPECT_LE(std::abs(output[i] - correct[i]), TOLERANCE); + } +} + TEST(ONNX, Max) { constexpr float TOLERANCE = DEFAULT_TOLERANCE; diff --git a/tmva/sofie/test/input_models/ReduceSum.onnx b/tmva/sofie/test/input_models/ReduceSum.onnx new file mode 100644 index 0000000000000..c5ded0381b03e Binary files /dev/null and b/tmva/sofie/test/input_models/ReduceSum.onnx differ diff --git a/tmva/sofie/test/input_models/ReduceSumSquare.onnx b/tmva/sofie/test/input_models/ReduceSumSquare.onnx new file mode 100644 index 0000000000000..40ef9494f06c8 Binary files /dev/null and b/tmva/sofie/test/input_models/ReduceSumSquare.onnx differ diff --git a/tmva/sofie_parsers/src/ParseReduce.cxx b/tmva/sofie_parsers/src/ParseReduce.cxx index c9182176c1347..6c18a4371c342 100644 --- a/tmva/sofie_parsers/src/ParseReduce.cxx +++ b/tmva/sofie_parsers/src/ParseReduce.cxx @@ -16,8 +16,8 @@ std::unique_ptr ParseReduce(RModelParser_ONNX &parser, const onnx::No if (nodeproto.op_type() == "ReduceMean") op_mode = ReduceMean; - else if (nodeproto.op_type() == "ReduceSumsquare") - op_mode = ReduceSumsquare; + else if (nodeproto.op_type() == "ReduceSumSquare") + op_mode = ReduceSumSquare; else if (nodeproto.op_type() == "ReduceProd") op_mode = ReduceProd; else if (nodeproto.op_type() == "ReduceSum") @@ -77,9 +77,9 @@ ParserFuncSignature ParseReduceMean = [](RModelParser_ONNX &parser, const onnx:: return ParseReduce(parser, nodeproto); }; -// Parse ReduceSumsquare -ParserFuncSignature ParseReduceSumsquare = [](RModelParser_ONNX &parser, const onnx::NodeProto &nodeproto) { - return ParseReduce(parser, nodeproto); +// Parse ReduceSumSquare +ParserFuncSignature ParseReduceSumSquare = [](RModelParser_ONNX &parser, const onnx::NodeProto &nodeproto) { + return ParseReduce(parser, nodeproto); }; // Parse ReduceProd diff --git a/tmva/sofie_parsers/src/RModelParser_ONNX.cxx b/tmva/sofie_parsers/src/RModelParser_ONNX.cxx index d6d472426d769..1415e06e0f3b8 100644 --- a/tmva/sofie_parsers/src/RModelParser_ONNX.cxx +++ b/tmva/sofie_parsers/src/RModelParser_ONNX.cxx @@ -42,7 +42,7 @@ extern ParserFuncSignature ParseGreaterEq; // Reduce operators extern ParserFuncSignature ParseReduceMean; extern ParserFuncSignature ParseReduceSum; -extern ParserFuncSignature ParseReduceSumsquare; +extern ParserFuncSignature ParseReduceSumSquare; extern ParserFuncSignature ParseReduceProd; // Others extern ParserFuncSignature ParseBatchNormalization; @@ -131,9 +131,9 @@ std::shared_ptr GetInitializedTensorData(onnx::TensorProto * tensorproto, #ifdef R__BYTESWAP std::memcpy(data.get(), tensorproto->raw_data().c_str(), length * sizeof(T)); #else - for (std::size_t k = 0; k < fLength; ++k) - (reinterpret_cast(data.get()))[k] = - Rbswap_32((reinterpret_cast(tensorproto->raw_data().c_str()))[k]); + for (std::size_t k = 0; k < length; ++k) + (reinterpret_cast::value_type *>(data.get()))[k] = + RByteSwap::bswap((reinterpret_cast::value_type *>(tensorproto->raw_data().c_str()))[k]); #endif } else { ExtractDataFromTP::Copy(tensorproto, data.get()); @@ -170,7 +170,7 @@ RModelParser_ONNX::RModelParser_ONNX() noexcept : fOperatorsMapImpl(std::make_un // Reduce operators RegisterOperator("ReduceMean", ParseReduceMean); RegisterOperator("ReduceSum", ParseReduceSum); - RegisterOperator("ReduceSumsquare", ParseReduceSumsquare); + RegisterOperator("ReduceSumSquare", ParseReduceSumSquare); RegisterOperator("ReduceProd", ParseReduceProd); // Others RegisterOperator("BatchNormalization", ParseBatchNormalization); @@ -628,4 +628,4 @@ void RModelParser_ONNX::ParseONNXGraph(RModel & rmodel, const onnx::GraphProto & } // namespace SOFIE } // namespace Experimental -} // namespace TMVA \ No newline at end of file +} // namespace TMVA diff --git a/tmva/tmva/CMakeLists.txt b/tmva/tmva/CMakeLists.txt index c92c2edd92c85..0fcb91bceb239 100644 --- a/tmva/tmva/CMakeLists.txt +++ b/tmva/tmva/CMakeLists.txt @@ -458,9 +458,9 @@ ROOT_STANDARD_LIBRARY_PACKAGE(TMVAUtils TMVA/RInferenceUtils.hxx TMVA/RBDT.hxx TMVA/RSofieReader.hxx - TMVA/RBatchGenerator.hxx - TMVA/RBatchLoader.hxx - TMVA/RChunkLoader.hxx + TMVA/BatchGenerator/RBatchGenerator.hxx + TMVA/BatchGenerator/RBatchLoader.hxx + TMVA/BatchGenerator/RChunkLoader.hxx SOURCES diff --git a/tmva/tmva/inc/TMVA/BatchGenerator/RBatchGenerator.hxx b/tmva/tmva/inc/TMVA/BatchGenerator/RBatchGenerator.hxx new file mode 100644 index 0000000000000..d56419f5cf8d4 --- /dev/null +++ b/tmva/tmva/inc/TMVA/BatchGenerator/RBatchGenerator.hxx @@ -0,0 +1,336 @@ +// Author: Dante Niewenhuis, VU Amsterdam 07/2023 +// Author: Kristupas Pranckietis, Vilnius University 05/2024 +// Author: Nopphakorn Subsa-Ard, King Mongkut's University of Technology Thonburi (KMUTT) (TH) 08/2024 +// Author: Vincenzo Eduardo Padulano, CERN 10/2024 + +/************************************************************************* + * Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. * + * All rights reserved. * + * * + * For the licensing terms see $ROOTSYS/LICENSE. * + * For the list of contributors see $ROOTSYS/README/CREDITS. * + *************************************************************************/ + +#ifndef TMVA_RBATCHGENERATOR +#define TMVA_RBATCHGENERATOR + +#include "TMVA/RTensor.hxx" +#include "ROOT/RDF/RDatasetSpec.hxx" +#include "TMVA/BatchGenerator/RChunkLoader.hxx" +#include "TMVA/BatchGenerator/RBatchLoader.hxx" +#include "TROOT.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace TMVA { +namespace Experimental { +namespace Internal { + +template +class RBatchGenerator { +private: + std::mt19937 fRng; + std::mt19937 fFixedRng; + std::random_device::result_type fFixedSeed; + + std::size_t fChunkSize; + std::size_t fMaxChunks; + std::size_t fBatchSize; + std::size_t fNumEntries; + + float fValidationSplit; + + std::variant>, std::shared_ptr>> fChunkLoader; + + std::unique_ptr fBatchLoader; + + std::unique_ptr fLoadingThread; + + std::unique_ptr> fChunkTensor; + + ROOT::RDF::RNode &f_rdf; + + std::mutex fIsActiveMutex; + + bool fDropRemainder; + bool fShuffle; + bool fIsActive{false}; // Whether the loading thread is active + bool fNotFiltered; + bool fUseWholeFile; + +public: + RBatchGenerator(ROOT::RDF::RNode &rdf, const std::size_t chunkSize, const std::size_t batchSize, + const std::vector &cols, const std::size_t numColumns, + const std::vector &vecSizes = {}, const float vecPadding = 0.0, + const float validationSplit = 0.0, const std::size_t maxChunks = 0, bool shuffle = true, + bool dropRemainder = true) + : fRng(std::random_device{}()), + fFixedSeed(std::uniform_int_distribution{}(fRng)), + f_rdf(rdf), + fChunkSize(chunkSize), + fBatchSize(batchSize), + fValidationSplit(validationSplit), + fMaxChunks(maxChunks), + fDropRemainder(dropRemainder), + fShuffle(shuffle), + fNotFiltered(f_rdf.GetFilterNames().empty()), + fUseWholeFile(maxChunks == 0) + { + + // Create tensor to load the chunk into + fChunkTensor = + std::make_unique>(std::vector{fChunkSize, numColumns}); + + if (fNotFiltered) { + fNumEntries = f_rdf.Count().GetValue(); + + fChunkLoader = std::make_unique>( + f_rdf, *fChunkTensor, fChunkSize, cols, vecSizes, vecPadding); + } else { + auto report = f_rdf.Report(); + fNumEntries = f_rdf.Count().GetValue(); + std::size_t numAllEntries = report.begin()->GetAll(); + + fChunkLoader = std::make_unique>( + f_rdf, *fChunkTensor, fChunkSize, cols, fNumEntries, numAllEntries, vecSizes, vecPadding); + } + + std::size_t maxBatches = ceil((fChunkSize / fBatchSize) * (1 - fValidationSplit)); + + // limits the number of batches that can be contained in the batchqueue based on the chunksize + fBatchLoader = std::make_unique(*fChunkTensor, fBatchSize, numColumns, + maxBatches); + } + + ~RBatchGenerator() { DeActivate(); } + + /// \brief De-activate the loading process by deactivating the batchgenerator + /// and joining the loading thread + void DeActivate() + { + { + std::lock_guard lock(fIsActiveMutex); + fIsActive = false; + } + + fBatchLoader->DeActivate(); + + if (fLoadingThread) { + if (fLoadingThread->joinable()) { + fLoadingThread->join(); + } + } + } + + /// \brief Activate the loading process by starting the batchloader, and + /// spawning the loading thread. + void Activate() + { + if (fIsActive) + return; + + { + std::lock_guard lock(fIsActiveMutex); + fIsActive = true; + } + + fFixedRng.seed(fFixedSeed); + fBatchLoader->Activate(); + // fLoadingThread = std::make_unique(&RBatchGenerator::LoadChunks, this); + if (fNotFiltered) { + fLoadingThread = std::make_unique(&RBatchGenerator::LoadChunksNoFilters, this); + } else { + fLoadingThread = std::make_unique(&RBatchGenerator::LoadChunksFilters, this); + } + } + + /// \brief Returns the next batch of training data if available. + /// Returns empty RTensor otherwise. + /// \return + const TMVA::Experimental::RTensor &GetTrainBatch() + { + // Get next batch if available + return fBatchLoader->GetTrainBatch(); + } + + /// \brief Returns the next batch of validation data if available. + /// Returns empty RTensor otherwise. + /// \return + const TMVA::Experimental::RTensor &GetValidationBatch() + { + // Get next batch if available + return fBatchLoader->GetValidationBatch(); + } + + std::size_t NumberOfTrainingBatches() + { + std::size_t entriesForTraining = + (fNumEntries / fChunkSize) * (fChunkSize - floor(fChunkSize * fValidationSplit)) + fNumEntries % fChunkSize - + floor(fValidationSplit * (fNumEntries % fChunkSize)); + + if (fDropRemainder || !(entriesForTraining % fBatchSize)) { + return entriesForTraining / fBatchSize; + } + + return entriesForTraining / fBatchSize + 1; + } + + /// @brief Return number of training remainder rows + /// @return + std::size_t TrainRemainderRows() + { + std::size_t entriesForTraining = + (fNumEntries / fChunkSize) * (fChunkSize - floor(fChunkSize * fValidationSplit)) + fNumEntries % fChunkSize - + floor(fValidationSplit * (fNumEntries % fChunkSize)); + + if (fDropRemainder || !(entriesForTraining % fBatchSize)) { + return 0; + } + + return entriesForTraining % fBatchSize; + } + + /// @brief Calculate number of validation batches and return it + /// @return + std::size_t NumberOfValidationBatches() + { + std::size_t entriesForValidation = (fNumEntries / fChunkSize) * floor(fChunkSize * fValidationSplit) + + floor((fNumEntries % fChunkSize) * fValidationSplit); + + if (fDropRemainder || !(entriesForValidation % fBatchSize)) { + + return entriesForValidation / fBatchSize; + } + + return entriesForValidation / fBatchSize + 1; + } + + /// @brief Return number of validation remainder rows + /// @return + std::size_t ValidationRemainderRows() + { + std::size_t entriesForValidation = (fNumEntries / fChunkSize) * floor(fChunkSize * fValidationSplit) + + floor((fNumEntries % fChunkSize) * fValidationSplit); + + if (fDropRemainder || !(entriesForValidation % fBatchSize)) { + + return 0; + } + + return entriesForValidation % fBatchSize; + } + + /// @brief Load chunks when no filters are applied on rdataframe + void LoadChunksNoFilters() + { + for (std::size_t currentChunk = 0, currentEntry = 0; + ((currentChunk < fMaxChunks) || fUseWholeFile) && currentEntry < fNumEntries; currentChunk++) { + + // stop the loop when the loading is not active anymore + { + std::lock_guard lock(fIsActiveMutex); + if (!fIsActive) + return; + } + + // A pair that consists the proccessed, and passed events while loading the chunk + std::size_t report = std::get>>(fChunkLoader)->LoadChunk(currentEntry); + currentEntry += report; + + CreateBatches(report); + } + + if (!fDropRemainder) { + fBatchLoader->LastBatches(); + } + + fBatchLoader->DeActivate(); + } + + void LoadChunksFilters() + { + std::size_t currentChunk = 0; + for (std::size_t processedEvents = 0, currentRow = 0; + ((currentChunk < fMaxChunks) || fUseWholeFile) && processedEvents < fNumEntries; currentChunk++) { + + // stop the loop when the loading is not active anymore + { + std::lock_guard lock(fIsActiveMutex); + if (!fIsActive) + return; + } + + // A pair that consists the proccessed, and passed events while loading the chunk + std::pair report = + std::get>>(fChunkLoader)->LoadChunk(currentRow); + + currentRow += report.first; + processedEvents += report.second; + + CreateBatches(report.second); + } + + if (currentChunk < fMaxChunks || fUseWholeFile) { + CreateBatches(std::get>>(fChunkLoader)->LastChunk()); + } + + if (!fDropRemainder) { + fBatchLoader->LastBatches(); + } + + fBatchLoader->DeActivate(); + } + + /// \brief Create batches + /// \param processedEvents + void CreateBatches(std::size_t processedEvents) + { + auto &&[trainingIndices, validationIndices] = createIndices(processedEvents); + + fBatchLoader->CreateTrainingBatches(trainingIndices); + fBatchLoader->CreateValidationBatches(validationIndices); + } + + /// \brief split the events of the current chunk into training and validation events, shuffle if needed + /// \param events + std::pair, std::vector> createIndices(std::size_t events) + { + // Create a vector of number 1..events + std::vector row_order = std::vector(events); + std::iota(row_order.begin(), row_order.end(), 0); + + if (fShuffle) { + // Shuffle the entry indices at every new epoch + std::shuffle(row_order.begin(), row_order.end(), fFixedRng); + } + + // calculate the number of events used for validation + std::size_t num_validation = floor(events * fValidationSplit); + + // Devide the vector into training and validation and return + std::vector trainingIndices = + std::vector({row_order.begin(), row_order.end() - num_validation}); + std::vector validationIndices = + std::vector({row_order.end() - num_validation, row_order.end()}); + + if (fShuffle) { + std::shuffle(trainingIndices.begin(), trainingIndices.end(), fRng); + } + + return std::make_pair(trainingIndices, validationIndices); + } + + bool IsActive() { return fIsActive; } +}; + +} // namespace Internal +} // namespace Experimental +} // namespace TMVA + +#endif // TMVA_RBATCHGENERATOR diff --git a/tmva/tmva/inc/TMVA/BatchGenerator/RBatchLoader.hxx b/tmva/tmva/inc/TMVA/BatchGenerator/RBatchLoader.hxx new file mode 100644 index 0000000000000..0da8eb4f38010 --- /dev/null +++ b/tmva/tmva/inc/TMVA/BatchGenerator/RBatchLoader.hxx @@ -0,0 +1,289 @@ +// Author: Dante Niewenhuis, VU Amsterdam 07/2023 +// Author: Kristupas Pranckietis, Vilnius University 05/2024 +// Author: Nopphakorn Subsa-Ard, King Mongkut's University of Technology Thonburi (KMUTT) (TH) 08/2024 +// Author: Vincenzo Eduardo Padulano, CERN 10/2024 + +/************************************************************************* + * Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. * + * All rights reserved. * + * * + * For the licensing terms see $ROOTSYS/LICENSE. * + * For the list of contributors see $ROOTSYS/README/CREDITS. * + *************************************************************************/ + +#ifndef TMVA_RBATCHLOADER +#define TMVA_RBATCHLOADER + +#include +#include +#include + +// Imports for threading +#include +#include +#include + +#include "TMVA/RTensor.hxx" +#include "TMVA/Tools.h" + +namespace TMVA { +namespace Experimental { +namespace Internal { + +class RBatchLoader { +private: + const TMVA::Experimental::RTensor &fChunkTensor; + std::size_t fBatchSize; + std::size_t fNumColumns; + std::size_t fMaxBatches; + std::size_t fTrainingRemainderRow = 0; + std::size_t fValidationRemainderRow = 0; + + bool fIsActive = false; + + std::mutex fBatchLock; + std::condition_variable fBatchCondition; + + std::queue>> fTrainingBatchQueue; + std::queue>> fValidationBatchQueue; + std::unique_ptr> fCurrentBatch; + + std::unique_ptr> fTrainingRemainder; + std::unique_ptr> fValidationRemainder; + +public: + RBatchLoader(const TMVA::Experimental::RTensor &chunkTensor, const std::size_t batchSize, + const std::size_t numColumns, const std::size_t maxBatches) + : fChunkTensor(chunkTensor), fBatchSize(batchSize), fNumColumns(numColumns), fMaxBatches(maxBatches) + { + // Create remainders tensors + fTrainingRemainder = + std::make_unique>(std::vector{fBatchSize - 1, fNumColumns}); + fValidationRemainder = + std::make_unique>(std::vector{fBatchSize - 1, fNumColumns}); + } + + ~RBatchLoader() { DeActivate(); } + +public: + /// \brief Return a batch of data as a unique pointer. + /// After the batch has been processed, it should be destroyed. + /// \return Training batch + const TMVA::Experimental::RTensor &GetTrainBatch() + { + std::unique_lock lock(fBatchLock); + fBatchCondition.wait(lock, [this]() { return !fTrainingBatchQueue.empty() || !fIsActive; }); + + if (fTrainingBatchQueue.empty()) { + fCurrentBatch = std::make_unique>(std::vector({0})); + return *fCurrentBatch; + } + + fCurrentBatch = std::move(fTrainingBatchQueue.front()); + fTrainingBatchQueue.pop(); + + fBatchCondition.notify_all(); + + return *fCurrentBatch; + } + + /// \brief Returns a batch of data for validation + /// The owner of this batch has to be with the RBatchLoader. + /// This is because the same validation batches should be used in all epochs. + /// \return Validation batch + const TMVA::Experimental::RTensor &GetValidationBatch() + { + if (fValidationBatchQueue.empty()) { + fCurrentBatch = std::make_unique>(std::vector({0})); + return *fCurrentBatch; + } + + fCurrentBatch = std::move(fValidationBatchQueue.front()); + fValidationBatchQueue.pop(); + + return *fCurrentBatch; + } + + /// \brief Activate the batchloader so it will accept chunks to batch + void Activate() + { + fTrainingRemainderRow = 0; + fValidationRemainderRow = 0; + + { + std::lock_guard lock(fBatchLock); + fIsActive = true; + } + fBatchCondition.notify_all(); + } + + /// \brief DeActivate the batchloader. This means that no more batches are created. + /// Batches can still be returned if they are already loaded + void DeActivate() + { + { + std::lock_guard lock(fBatchLock); + fIsActive = false; + } + fBatchCondition.notify_all(); + } + + std::unique_ptr> + CreateBatch(const TMVA::Experimental::RTensor &chunkTensor, std::span idxs, + std::size_t batchSize) + { + auto batch = + std::make_unique>(std::vector({batchSize, fNumColumns})); + + for (std::size_t i = 0; i < batchSize; i++) { + std::copy(chunkTensor.GetData() + (idxs[i] * fNumColumns), + chunkTensor.GetData() + ((idxs[i] + 1) * fNumColumns), batch->GetData() + i * fNumColumns); + } + + return batch; + } + + std::unique_ptr> + CreateFirstBatch(const TMVA::Experimental::RTensor &remainderTensor, std::size_t remainderTensorRow, + std::span eventIndices) + { + auto batch = + std::make_unique>(std::vector({fBatchSize, fNumColumns})); + + for (size_t i = 0; i < remainderTensorRow; i++) { + std::copy(remainderTensor.GetData() + i * fNumColumns, remainderTensor.GetData() + (i + 1) * fNumColumns, + batch->GetData() + i * fNumColumns); + } + + for (std::size_t i = 0; i < (fBatchSize - remainderTensorRow); i++) { + std::copy(fChunkTensor.GetData() + eventIndices[i] * fNumColumns, + fChunkTensor.GetData() + (eventIndices[i] + 1) * fNumColumns, + batch->GetData() + (i + remainderTensorRow) * fNumColumns); + } + + return batch; + } + + /// @brief save to remaining data when the whole chunk has to be saved + /// @param chunkTensor + /// @param remainderTensor + /// @param remainderTensorRow + /// @param eventIndices + void SaveRemainingData(TMVA::Experimental::RTensor &remainderTensor, const std::size_t remainderTensorRow, + const std::vector eventIndices, const std::size_t start = 0) + { + for (std::size_t i = start; i < eventIndices.size(); i++) { + std::copy(fChunkTensor.GetData() + eventIndices[i] * fNumColumns, + fChunkTensor.GetData() + (eventIndices[i] + 1) * fNumColumns, + remainderTensor.GetData() + (i - start + remainderTensorRow) * fNumColumns); + } + } + + /// \brief Create training batches from the given chunk of data based on the given event indices + /// Batches are added to the training queue of batches + /// \param chunkTensor + /// \param eventIndices + void CreateTrainingBatches(const std::vector &eventIndices) + { + // Wait until less than a full chunk of batches are in the queue before splitting the next chunk into + // batches + { + std::unique_lock lock(fBatchLock); + fBatchCondition.wait(lock, [this]() { return (fTrainingBatchQueue.size() < fMaxBatches) || !fIsActive; }); + if (!fIsActive) + return; + } + + std::vector>> batches; + + if (eventIndices.size() + fTrainingRemainderRow >= fBatchSize) { + batches.emplace_back(CreateFirstBatch(*fTrainingRemainder, fTrainingRemainderRow, eventIndices)); + } else { + SaveRemainingData(*fTrainingRemainder, fTrainingRemainderRow, eventIndices); + fTrainingRemainderRow += eventIndices.size(); + return; + } + + // Create tasks of fBatchSize until all idx are used + std::size_t start = fBatchSize - fTrainingRemainderRow; + for (; (start + fBatchSize) <= eventIndices.size(); start += fBatchSize) { + // Grab the first fBatchSize indices + std::span idxs{eventIndices.data() + start, eventIndices.data() + start + fBatchSize}; + + // Fill a batch + batches.emplace_back(CreateBatch(fChunkTensor, idxs, fBatchSize)); + } + + { + std::unique_lock lock(fBatchLock); + for (std::size_t i = 0; i < batches.size(); i++) { + fTrainingBatchQueue.push(std::move(batches[i])); + } + } + + fBatchCondition.notify_all(); + + fTrainingRemainderRow = eventIndices.size() - start; + SaveRemainingData(*fTrainingRemainder, 0, eventIndices, start); + } + + /// \brief Create validation batches from the given chunk based on the given event indices + /// Batches are added to the vector of validation batches + /// \param chunkTensor + /// \param eventIndices + void CreateValidationBatches(const std::vector &eventIndices) + { + if (eventIndices.size() + fValidationRemainderRow >= fBatchSize) { + fValidationBatchQueue.push(CreateFirstBatch(*fValidationRemainder, fValidationRemainderRow, eventIndices)); + } else { + SaveRemainingData(*fValidationRemainder, fValidationRemainderRow, eventIndices); + fValidationRemainderRow += eventIndices.size(); + return; + } + + // Create tasks of fBatchSize untill all idx are used + std::size_t start = fBatchSize - fValidationRemainderRow; + for (; (start + fBatchSize) <= eventIndices.size(); start += fBatchSize) { + + std::vector idx; + + for (std::size_t i = start; i < (start + fBatchSize); i++) { + idx.push_back(eventIndices[i]); + } + + fValidationBatchQueue.push(CreateBatch(fChunkTensor, idx, fBatchSize)); + } + + fValidationRemainderRow = eventIndices.size() - start; + SaveRemainingData(*fValidationRemainder, 0, eventIndices, start); + } + + void LastBatches() + { + { + if (fTrainingRemainderRow) { + std::vector idx = std::vector(fTrainingRemainderRow); + std::iota(idx.begin(), idx.end(), 0); + + std::unique_ptr> batch = + CreateBatch(*fTrainingRemainder, idx, fTrainingRemainderRow); + + std::unique_lock lock(fBatchLock); + fTrainingBatchQueue.push(std::move(batch)); + } + } + + if (fValidationRemainderRow) { + std::vector idx = std::vector(fValidationRemainderRow); + std::iota(idx.begin(), idx.end(), 0); + + fValidationBatchQueue.push(CreateBatch(*fValidationRemainder, idx, fValidationRemainderRow)); + } + } +}; + +} // namespace Internal +} // namespace Experimental +} // namespace TMVA + +#endif // TMVA_RBATCHLOADER diff --git a/tmva/tmva/inc/TMVA/BatchGenerator/RChunkLoader.hxx b/tmva/tmva/inc/TMVA/BatchGenerator/RChunkLoader.hxx new file mode 100644 index 0000000000000..d06003bc7afc5 --- /dev/null +++ b/tmva/tmva/inc/TMVA/BatchGenerator/RChunkLoader.hxx @@ -0,0 +1,295 @@ +// Author: Dante Niewenhuis, VU Amsterdam 07/2023 +// Author: Kristupas Pranckietis, Vilnius University 05/2024 +// Author: Nopphakorn Subsa-Ard, King Mongkut's University of Technology Thonburi (KMUTT) (TH) 08/2024 +// Author: Vincenzo Eduardo Padulano, CERN 10/2024 + +/************************************************************************* + * Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. * + * All rights reserved. * + * * + * For the licensing terms see $ROOTSYS/LICENSE. * + * For the list of contributors see $ROOTSYS/README/CREDITS. * + *************************************************************************/ + +#ifndef TMVA_RCHUNKLOADER +#define TMVA_RCHUNKLOADER + +#include + +#include "TMVA/RTensor.hxx" +#include "ROOT/RDataFrame.hxx" +#include "ROOT/RDF/Utils.hxx" +#include "ROOT/RVec.hxx" + +#include "ROOT/RLogger.hxx" + +namespace TMVA { +namespace Experimental { +namespace Internal { + +// RChunkLoader class used to load content of a RDataFrame onto a RTensor. +template +class RChunkLoaderFunctor { + std::size_t fOffset{}; + std::size_t fVecSizeIdx{}; + float fVecPadding{}; + std::vector fMaxVecSizes{}; + + TMVA::Experimental::RTensor &fChunkTensor; + + template ::value, int> = 0> + void AssignToTensor(const T &vec) + { + const auto &max_vec_size = fMaxVecSizes[fVecSizeIdx++]; + const auto &vec_size = vec.size(); + if (vec_size < max_vec_size) // Padding vector column to max_vec_size with fVecPadding + { + std::copy(vec.cbegin(), vec.cend(), &fChunkTensor.GetData()[fOffset]); + std::fill(&fChunkTensor.GetData()[fOffset + vec_size], &fChunkTensor.GetData()[fOffset + max_vec_size], + fVecPadding); + } else // Copy only max_vec_size length from vector column + { + std::copy(vec.cbegin(), vec.cbegin() + max_vec_size, &fChunkTensor.GetData()[fOffset]); + } + fOffset += max_vec_size; + } + + template ::value, int> = 0> + void AssignToTensor(const T &val) + { + fChunkTensor.GetData()[fOffset++] = val; + } + +public: + RChunkLoaderFunctor(TMVA::Experimental::RTensor &chunkTensor, const std::vector &maxVecSizes, + float vecPadding) + : fChunkTensor(chunkTensor), fMaxVecSizes(maxVecSizes), fVecPadding(vecPadding) + { + } + + void operator()(const ColTypes &...cols) + { + fVecSizeIdx = 0; + (AssignToTensor(cols), ...); + } +}; + +template +class RChunkLoaderFunctorFilters { + +private: + std::size_t fOffset{}; + std::size_t fVecSizeIdx{}; + std::size_t fEntries{}; + std::size_t fChunkSize{}; + float fVecPadding{}; + std::vector fMaxVecSizes{}; + + TMVA::Experimental::RTensor &fChunkTensor; + TMVA::Experimental::RTensor &fRemainderTensor; + + template ::value, int> = 0> + void AssignToTensor(const T &vec) + { + std::size_t max_vec_size = fMaxVecSizes[fVecSizeIdx++]; + std::size_t vec_size = vec.size(); + if (vec_size < max_vec_size) // Padding vector column to max_vec_size with fVecPadding + { + std::copy(vec.begin(), vec.end(), &fChunkTensor.GetData()[fOffset]); + std::fill(&fChunkTensor.GetData()[fOffset + vec_size], &fChunkTensor.GetData()[fOffset + max_vec_size], + fVecPadding); + } else // Copy only max_vec_size length from vector column + { + std::copy(vec.begin(), vec.begin() + max_vec_size, &fChunkTensor.GetData()[fOffset]); + } + fOffset += max_vec_size; + fEntries++; + } + + template ::value, int> = 0> + void AssignToTensor(const T &val) + { + fChunkTensor.GetData()[fOffset++] = val; + fEntries++; + } + +public: + RChunkLoaderFunctorFilters(TMVA::Experimental::RTensor &chunkTensor, + TMVA::Experimental::RTensor &remainderTensor, std::size_t entries, + std::size_t chunkSize, std::size_t &&offset, + const std::vector &maxVecSizes = std::vector(), + const float vecPadding = 0.0) + : fChunkTensor(chunkTensor), + fRemainderTensor(remainderTensor), + fEntries(entries), + fChunkSize(chunkSize), + fOffset(offset), + fMaxVecSizes(maxVecSizes), + fVecPadding(vecPadding) + { + } + + void operator()(const ColTypes &...cols) + { + fVecSizeIdx = 0; + if (fEntries == fChunkSize) { + fChunkTensor = fRemainderTensor; + fOffset = 0; + } + (AssignToTensor(cols), ...); + } + + std::size_t &SetEntries() { return fEntries; } + std::size_t &SetOffset() { return fOffset; } +}; + +template +class RChunkLoader { + +private: + std::size_t fChunkSize; + + std::vector fCols; + + std::vector fVecSizes; + std::size_t fVecPadding; + + ROOT::RDF::RNode &f_rdf; + TMVA::Experimental::RTensor &fChunkTensor; + +public: + /// \brief Constructor for the RChunkLoader + /// \param rdf + /// \param chunkSize + /// \param cols + /// \param vecSizes + /// \param vecPadding + RChunkLoader(ROOT::RDF::RNode &rdf, TMVA::Experimental::RTensor &chunkTensor, const std::size_t chunkSize, + const std::vector &cols, const std::vector &vecSizes = {}, + const float vecPadding = 0.0) + : f_rdf(rdf), + fChunkTensor(chunkTensor), + fChunkSize(chunkSize), + fCols(cols), + fVecSizes(vecSizes), + fVecPadding(vecPadding) + { + } + + /// \brief Load a chunk of data using the RChunkLoaderFunctor + /// \param chunkTensor + /// \param currentRow + /// \return Number of processed events + std::size_t LoadChunk(const std::size_t currentRow) + { + RChunkLoaderFunctor func(fChunkTensor, fVecSizes, fVecPadding); + + ROOT::Internal::RDF::ChangeBeginAndEndEntries(f_rdf, currentRow, currentRow + fChunkSize); + auto myCount = f_rdf.Count(); + + // load data + f_rdf.Foreach(func, fCols); + + // get loading info + return myCount.GetValue(); + } +}; + +template +class RChunkLoaderFilters { + +private: + ROOT::RDF::RNode &f_rdf; + TMVA::Experimental::RTensor &fChunkTensor; + + std::size_t fChunkSize; + std::vector fCols; + const std::size_t fNumEntries; + std::size_t fNumAllEntries; + std::vector fVecSizes; + std::size_t fVecPadding; + std::size_t fNumColumns; + + const std::size_t fPartOfChunkSize; + TMVA::Experimental::RTensor fRemainderChunkTensor; + std::size_t fRemainderChunkTensorRow = 0; + +public: + /// \brief Constructor for the RChunkLoader + /// \param rdf + /// \param chunkSize + /// \param cols + /// \param filters + /// \param vecSizes + /// \param vecPadding + RChunkLoaderFilters(ROOT::RDF::RNode &rdf, TMVA::Experimental::RTensor &chunkTensor, + const std::size_t chunkSize, const std::vector &cols, std::size_t numEntries, + std::size_t numAllEntries, const std::vector &vecSizes = {}, + const float vecPadding = 0.0) + : f_rdf(rdf), + fChunkTensor(chunkTensor), + fChunkSize(chunkSize), + fCols(cols), + fNumEntries(numEntries), + fNumAllEntries(numAllEntries), + fVecSizes(vecSizes), + fVecPadding(vecPadding), + fNumColumns(cols.size()), + fPartOfChunkSize(chunkSize / 5), + fRemainderChunkTensor(std::vector{fPartOfChunkSize, fNumColumns}) + { + } + + /// \brief Load a chunk of data using the RChunkLoaderFunctor + /// \param chunkTensor + /// \param currentRow + /// \return A pair of size_t defining the number of events processed and how many passed all filters + std::pair LoadChunk(std::size_t currentRow) + { + for (std::size_t i = 0; i < fRemainderChunkTensorRow; i++) { + std::copy(fRemainderChunkTensor.GetData() + (i * fNumColumns), + fRemainderChunkTensor.GetData() + ((i + 1) * fNumColumns), + fChunkTensor.GetData() + (i * fNumColumns)); + } + + RChunkLoaderFunctorFilters func(fChunkTensor, fRemainderChunkTensor, fRemainderChunkTensorRow, + fChunkSize, fRemainderChunkTensorRow * fNumColumns, fVecSizes, + fVecPadding); + + std::size_t passedEvents = 0; + std::size_t processedEvents = 0; + + while ((passedEvents < fChunkSize && passedEvents < fNumEntries) && currentRow < fNumAllEntries) { + ROOT::Internal::RDF::ChangeBeginAndEndEntries(f_rdf, currentRow, currentRow + fPartOfChunkSize); + auto report = f_rdf.Report(); + + f_rdf.Foreach(func, fCols); + + processedEvents += report.begin()->GetAll(); + passedEvents += (report.end() - 1)->GetPass(); + + currentRow += fPartOfChunkSize; + func.SetEntries() = passedEvents; + func.SetOffset() = passedEvents * fNumColumns; + } + + fRemainderChunkTensorRow = passedEvents > fChunkSize ? passedEvents - fChunkSize : 0; + + return std::make_pair(processedEvents, passedEvents); + } + + std::size_t LastChunk() + { + for (std::size_t i = 0; i < fRemainderChunkTensorRow; i++) { + std::copy(fRemainderChunkTensor.GetData() + (i * fNumColumns), + fRemainderChunkTensor.GetData() + ((i + 1) * fNumColumns), + fChunkTensor.GetData() + (i * fNumColumns)); + } + + return fRemainderChunkTensorRow; + } +}; +} // namespace Internal +} // namespace Experimental +} // namespace TMVA +#endif // TMVA_RCHUNKLOADER diff --git a/tmva/tmva/inc/TMVA/RBatchGenerator.hxx b/tmva/tmva/inc/TMVA/RBatchGenerator.hxx deleted file mode 100644 index 500587db1fd6b..0000000000000 --- a/tmva/tmva/inc/TMVA/RBatchGenerator.hxx +++ /dev/null @@ -1,237 +0,0 @@ -#ifndef TMVA_BATCHGENERATOR -#define TMVA_BATCHGENERATOR - -#include -#include -#include -#include -#include -#include - -#include "TMVA/RTensor.hxx" -#include "ROOT/RDF/RDatasetSpec.hxx" -#include "TMVA/RChunkLoader.hxx" -#include "TMVA/RBatchLoader.hxx" -#include "TMVA/Tools.h" -#include "TRandom3.h" -#include "TROOT.h" - -namespace TMVA { -namespace Experimental { -namespace Internal { - -template -class RBatchGenerator { -private: - TMVA::RandomGenerator fRng = TMVA::RandomGenerator(0); - - std::string fFileName; - std::string fTreeName; - - std::vector fCols; - std::string fFilters; - - std::size_t fChunkSize; - std::size_t fMaxChunks; - std::size_t fBatchSize; - std::size_t fMaxBatches; - std::size_t fNumColumns; - std::size_t fNumEntries; - std::size_t fCurrentRow = 0; - - float fValidationSplit; - - std::unique_ptr> fChunkLoader; - std::unique_ptr fBatchLoader; - - std::unique_ptr fLoadingThread; - - bool fUseWholeFile = true; - - std::unique_ptr> fChunkTensor; - std::unique_ptr> fCurrentBatch; - - std::vector> fTrainingIdxs; - std::vector> fValidationIdxs; - - // filled batch elements - std::mutex fIsActiveLock; - - bool fShuffle = true; - bool fIsActive = false; - - std::vector fVecSizes; - float fVecPadding; - -public: - RBatchGenerator(const std::string &treeName, const std::string &fileName, const std::size_t chunkSize, - const std::size_t batchSize, const std::vector &cols, const std::string &filters = "", - const std::vector &vecSizes = {}, const float vecPadding = 0.0, - const float validationSplit = 0.0, const std::size_t maxChunks = 0, const std::size_t numColumns = 0, - bool shuffle = true) - : fTreeName(treeName), - fFileName(fileName), - fChunkSize(chunkSize), - fBatchSize(batchSize), - fCols(cols), - fFilters(filters), - fVecSizes(vecSizes), - fVecPadding(vecPadding), - fValidationSplit(validationSplit), - fMaxChunks(maxChunks), - fNumColumns((numColumns != 0) ? numColumns : cols.size()), - fShuffle(shuffle), - fUseWholeFile(maxChunks == 0) - { - // limits the number of batches that can be contained in the batchqueue based on the chunksize - fMaxBatches = ceil((fChunkSize / fBatchSize) * (1 - fValidationSplit)); - - // get the number of fNumEntries in the dataframe - std::unique_ptr f{TFile::Open(fFileName.c_str())}; - std::unique_ptr t{f->Get(fTreeName.c_str())}; - fNumEntries = t->GetEntries(); - - fChunkLoader = std::make_unique>( - fTreeName, fFileName, fChunkSize, fCols, fFilters, fVecSizes, fVecPadding); - fBatchLoader = std::make_unique(fBatchSize, fNumColumns, fMaxBatches); - - // Create tensor to load the chunk into - fChunkTensor = - std::make_unique>(std::vector{fChunkSize, fNumColumns}); - } - - ~RBatchGenerator() { DeActivate(); } - - /// \brief De-activate the loading process by deactivating the batchgenerator - /// and joining the loading thread - void DeActivate() - { - { - std::lock_guard lock(fIsActiveLock); - fIsActive = false; - } - - fBatchLoader->DeActivate(); - - if (fLoadingThread) { - if (fLoadingThread->joinable()) { - fLoadingThread->join(); - } - } - } - - /// \brief Activate the loading process by starting the batchloader, and - /// spawning the loading thread. - void Activate() - { - if (fIsActive) - return; - - { - std::lock_guard lock(fIsActiveLock); - fIsActive = true; - } - - fCurrentRow = 0; - fBatchLoader->Activate(); - fLoadingThread = std::make_unique(&RBatchGenerator::LoadChunks, this); - } - - /// \brief Returns the next batch of training data if available. - /// Returns empty RTensor otherwise. - /// \return - const TMVA::Experimental::RTensor &GetTrainBatch() - { - // Get next batch if available - return fBatchLoader->GetTrainBatch(); - } - - /// \brief Returns the next batch of validation data if available. - /// Returns empty RTensor otherwise. - /// \return - const TMVA::Experimental::RTensor &GetValidationBatch() - { - // Get next batch if available - return fBatchLoader->GetValidationBatch(); - } - - bool HasTrainData() { return fBatchLoader->HasTrainData(); } - - bool HasValidationData() { return fBatchLoader->HasValidationData(); } - - void LoadChunks() - { - for (std::size_t current_chunk = 0; ((current_chunk < fMaxChunks) || fUseWholeFile) && fCurrentRow < fNumEntries; - current_chunk++) { - - // stop the loop when the loading is not active anymore - { - std::lock_guard lock(fIsActiveLock); - if (!fIsActive) - return; - } - - // A pair that consists the proccessed, and passed events while loading the chunk - std::pair report = fChunkLoader->LoadChunk(*fChunkTensor, fCurrentRow); - fCurrentRow += report.first; - - CreateBatches(current_chunk, report.second); - - // Stop loading if the number of processed events is smaller than the desired chunk size - if (report.first < fChunkSize) { - break; - } - } - - fBatchLoader->DeActivate(); - } - - /// \brief Create batches for the current_chunk. - /// \param currentChunk - /// \param processedEvents - void CreateBatches(std::size_t currentChunk, std::size_t processedEvents) - { - - // Check if the indices in this chunk where already split in train and validations - if (fTrainingIdxs.size() > currentChunk) { - fBatchLoader->CreateTrainingBatches(*fChunkTensor, fTrainingIdxs[currentChunk], fShuffle); - } else { - // Create the Validation batches if this is not the first epoch - createIdxs(processedEvents); - fBatchLoader->CreateTrainingBatches(*fChunkTensor, fTrainingIdxs[currentChunk], fShuffle); - fBatchLoader->CreateValidationBatches(*fChunkTensor, fValidationIdxs[currentChunk]); - } - } - - /// \brief plit the events of the current chunk into validation and training events - /// \param processedEvents - void createIdxs(std::size_t processedEvents) - { - // Create a vector of number 1..processedEvents - std::vector row_order = std::vector(processedEvents); - std::iota(row_order.begin(), row_order.end(), 0); - - if (fShuffle) { - std::shuffle(row_order.begin(), row_order.end(), fRng); - } - - // calculate the number of events used for validation - std::size_t num_validation = ceil(processedEvents * fValidationSplit); - - // Devide the vector into training and validation - std::vector valid_idx({row_order.begin(), row_order.begin() + num_validation}); - std::vector train_idx({row_order.begin() + num_validation, row_order.end()}); - - fTrainingIdxs.push_back(train_idx); - fValidationIdxs.push_back(valid_idx); - } - - void StartValidation() { fBatchLoader->StartValidation(); } - bool IsActive() { return fIsActive; } -}; - -} // namespace Internal -} // namespace Experimental -} // namespace TMVA - -#endif // TMVA_BATCHGENERATOR diff --git a/tmva/tmva/inc/TMVA/RBatchLoader.hxx b/tmva/tmva/inc/TMVA/RBatchLoader.hxx deleted file mode 100644 index cb5ef8b4af676..0000000000000 --- a/tmva/tmva/inc/TMVA/RBatchLoader.hxx +++ /dev/null @@ -1,225 +0,0 @@ -#ifndef TMVA_RBatchLoader -#define TMVA_RBatchLoader - -#include -#include -#include - -// Imports for threading -#include -#include -#include - -#include "TMVA/RTensor.hxx" -#include "TMVA/Tools.h" -#include "TRandom3.h" - -namespace TMVA { -namespace Experimental { -namespace Internal { - -class RBatchLoader { -private: - std::size_t fBatchSize; - std::size_t fNumColumns; - std::size_t fMaxBatches; - - bool fIsActive = false; - TMVA::RandomGenerator fRng = TMVA::RandomGenerator(0); - - std::mutex fBatchLock; - std::condition_variable fBatchCondition; - - std::queue>> fTrainingBatchQueue; - std::vector>> fValidationBatches; - std::unique_ptr> fCurrentBatch; - - std::size_t fValidationIdx = 0; - - TMVA::Experimental::RTensor fEmptyTensor = TMVA::Experimental::RTensor({0}); - -public: - RBatchLoader(const std::size_t batchSize, const std::size_t numColumns, const std::size_t maxBatches) - : fBatchSize(batchSize), fNumColumns(numColumns), fMaxBatches(maxBatches) - { - } - - ~RBatchLoader() { DeActivate(); } - -public: - /// \brief Return a batch of data as a unique pointer. - /// After the batch has been processed, it should be distroyed. - /// \return Training batch - const TMVA::Experimental::RTensor &GetTrainBatch() - { - std::unique_lock lock(fBatchLock); - fBatchCondition.wait(lock, [this]() { return !fTrainingBatchQueue.empty() || !fIsActive; }); - - if (fTrainingBatchQueue.empty()) { - fCurrentBatch = std::make_unique>(std::vector({0})); - return *fCurrentBatch; - } - - fCurrentBatch = std::move(fTrainingBatchQueue.front()); - fTrainingBatchQueue.pop(); - - fBatchCondition.notify_all(); - - return *fCurrentBatch; - } - - /// \brief Returns a batch of data for validation - /// The owner of this batch has to be with the RBatchLoader. - /// This is because the same validation batches should be used in all epochs. - /// \return Validation batch - const TMVA::Experimental::RTensor &GetValidationBatch() - { - if (HasValidationData()) { - return *fValidationBatches[fValidationIdx++].get(); - } - - return fEmptyTensor; - } - - /// \brief Checks if there are more training batches available - /// \return - bool HasTrainData() - { - { - std::unique_lock lock(fBatchLock); - if (!fTrainingBatchQueue.empty() || fIsActive) - return true; - } - - return false; - } - - /// \brief Checks if there are more training batches available - /// \return - bool HasValidationData() - { - std::unique_lock lock(fBatchLock); - return fValidationIdx < fValidationBatches.size(); - } - - /// \brief Activate the batchloader so it will accept chunks to batch - void Activate() - { - { - std::lock_guard lock(fBatchLock); - fIsActive = true; - } - fBatchCondition.notify_all(); - } - - /// \brief DeActivate the batchloader. This means that no more batches are created. - /// Batches can still be returned if they are already loaded - void DeActivate() - { - { - std::lock_guard lock(fBatchLock); - fIsActive = false; - } - fBatchCondition.notify_all(); - } - - /// \brief Create a batch filled with the events on the given idx - /// \param chunkTensor - /// \param idx - /// \return - std::unique_ptr> - CreateBatch(const TMVA::Experimental::RTensor &chunkTensor, const std::vector idx) - { - auto batch = - std::make_unique>(std::vector({fBatchSize, fNumColumns})); - - for (std::size_t i = 0; i < fBatchSize; i++) { - std::copy(chunkTensor.GetData() + (idx[i] * fNumColumns), chunkTensor.GetData() + ((idx[i] + 1) * fNumColumns), - batch->GetData() + i * fNumColumns); - } - - return batch; - } - - /// \brief Create training batches from the given chunk of data based on the given event indices - /// Batches are added to the training queue of batches - /// The eventIndices can be shuffled to ensure random order for each epoch - /// \param chunkTensor - /// \param eventIndices - /// \param shuffle - void CreateTrainingBatches(const TMVA::Experimental::RTensor &chunkTensor, - std::vector eventIndices, const bool shuffle = true) - { - // Wait until less than a full chunk of batches are in the queue before loading splitting the next chunk into - // batches - { - std::unique_lock lock(fBatchLock); - fBatchCondition.wait(lock, [this]() { return (fTrainingBatchQueue.size() < fMaxBatches) || !fIsActive; }); - if (!fIsActive) - return; - } - - if (shuffle) - std::shuffle(eventIndices.begin(), eventIndices.end(), fRng); // Shuffle the order of idx - - std::vector>> batches; - - // Create tasks of fBatchSize untill all idx are used - for (std::size_t start = 0; (start + fBatchSize) <= eventIndices.size(); start += fBatchSize) { - - // Grab the first fBatchSize indices from the - std::vector idx; - for (std::size_t i = start; i < (start + fBatchSize); i++) { - idx.push_back(eventIndices[i]); - } - - // Fill a batch - batches.emplace_back(CreateBatch(chunkTensor, idx)); - } - - { - std::unique_lock lock(fBatchLock); - for (std::size_t i = 0; i < batches.size(); i++) { - fTrainingBatchQueue.push(std::move(batches[i])); - } - } - - fBatchCondition.notify_one(); - } - - /// \brief Create validation batches from the given chunk based on the given event indices - /// Batches are added to the vector of validation batches - /// \param chunkTensor - /// \param eventIndices - void CreateValidationBatches(const TMVA::Experimental::RTensor &chunkTensor, - const std::vector eventIndices) - { - // Create tasks of fBatchSize untill all idx are used - for (std::size_t start = 0; (start + fBatchSize) <= eventIndices.size(); start += fBatchSize) { - - std::vector idx; - - for (std::size_t i = start; i < (start + fBatchSize); i++) { - idx.push_back(eventIndices[i]); - } - - { - std::unique_lock lock(fBatchLock); - fValidationBatches.emplace_back(CreateBatch(chunkTensor, idx)); - } - } - } - - /// \brief Reset the validation process - void StartValidation() - { - std::unique_lock lock(fBatchLock); - fValidationIdx = 0; - } -}; - -} // namespace Internal -} // namespace Experimental -} // namespace TMVA - -#endif // TMVA_RBatchLoader diff --git a/tmva/tmva/inc/TMVA/RChunkLoader.hxx b/tmva/tmva/inc/TMVA/RChunkLoader.hxx deleted file mode 100644 index e74c9c76da5c3..0000000000000 --- a/tmva/tmva/inc/TMVA/RChunkLoader.hxx +++ /dev/null @@ -1,230 +0,0 @@ -#ifndef TMVA_CHUNKLOADER -#define TMVA_CHUNKLOADER - -#include -#include - -#include "TMVA/RTensor.hxx" -#include "ROOT/RDataFrame.hxx" -#include "ROOT/RVec.hxx" - -#include "ROOT/RLogger.hxx" - -namespace TMVA { -namespace Experimental { -namespace Internal { - -// RChunkLoader class used to load content of a RDataFrame onto a RTensor. -template -class RChunkLoaderFunctor { - -private: - std::size_t fOffset = 0; - std::size_t fVecSizeIdx = 0; - std::vector fMaxVecSizes; - - float fVecPadding; - - TMVA::Experimental::RTensor &fChunkTensor; - - /// \brief Load the final given value into fChunkTensor - /// \tparam First_T - /// \param first - template - void AssignToTensor(First_T first) - { - fChunkTensor.GetData()[fOffset++] = first; - } - - /// \brief Load the final given value into fChunkTensor - /// \tparam VecType - /// \param first - template - void AssignToTensor(const ROOT::RVec &first) - { - AssignVector(first); - } - - /// \brief Recursively loop through the given values, and load them onto the fChunkTensor - /// \tparam First_T - /// \tparam ...Rest_T - /// \param first - /// \param ...rest - template - void AssignToTensor(First_T first, Rest_T... rest) - { - fChunkTensor.GetData()[fOffset++] = first; - - AssignToTensor(std::forward(rest)...); - } - - /// \brief Recursively loop through the given values, and load them onto the fChunkTensor - /// \tparam VecType - /// \tparam ...Rest_T - /// \param first - /// \param ...rest - template - void AssignToTensor(const ROOT::RVec &first, Rest_T... rest) - { - AssignVector(first); - - AssignToTensor(std::forward(rest)...); - } - - /// \brief Loop through the values of a given vector and load them into the RTensor - /// Note: the given vec_size does not have to be the same size as the given vector - /// If the size is bigger than the given vector, zeros are used as padding. - /// If the size is smaller, the remaining values are ignored. - /// \tparam VecType - /// \param vec - template - void AssignVector(const ROOT::RVec &vec) - { - std::size_t max_vec_size = fMaxVecSizes[fVecSizeIdx++]; - std::size_t vec_size = vec.size(); - - for (std::size_t i = 0; i < max_vec_size; i++) { - if (i < vec_size) { - fChunkTensor.GetData()[fOffset++] = vec[i]; - } else { - fChunkTensor.GetData()[fOffset++] = fVecPadding; - } - } - } - -public: - RChunkLoaderFunctor(TMVA::Experimental::RTensor &chunkTensor, - const std::vector &maxVecSizes = std::vector(), - const float vecPadding = 0.0) - : fChunkTensor(chunkTensor), fMaxVecSizes(maxVecSizes), fVecPadding(vecPadding) - { - } - - /// \brief Loop through all columns of an event and put their values into an RTensor - /// \param first - /// \param ...rest - void operator()(First first, Rest... rest) - { - fVecSizeIdx = 0; - AssignToTensor(std::forward(first), std::forward(rest)...); - } -}; - -template -class RChunkLoader { - -private: - std::string fTreeName; - std::string fFileName; - std::size_t fChunkSize; - std::size_t fNumColumns; - - std::vector fCols; - std::string fFilters; - - std::vector fVecSizes; - std::size_t fVecPadding; - -public: - /// \brief Constructor for the RChunkLoader - /// \param treeName - /// \param fileName - /// \param chunkSize - /// \param cols - /// \param filters - /// \param vecSizes - /// \param vecPadding - RChunkLoader(const std::string &treeName, const std::string &fileName, const std::size_t chunkSize, - const std::vector &cols, const std::string &filters = "", - const std::vector &vecSizes = {}, const float vecPadding = 0.0) - : fTreeName(treeName), - fFileName(fileName), - fChunkSize(chunkSize), - fCols(cols), - fFilters(filters), - fVecSizes(vecSizes), - fVecPadding(vecPadding), - fNumColumns(cols.size()) - { - } - - /// \brief Load a chunk of data using the RChunkLoaderFunctor - /// \param chunkTensor - /// \param currentRow - /// \return A pair of size_t defining the number of events processed and how many passed all filters - std::pair - LoadChunk(TMVA::Experimental::RTensor &chunkTensor, const std::size_t currentRow) - { - RChunkLoaderFunctor func(chunkTensor, fVecSizes, fVecPadding); - - // Create TDataFrame of the chunk - // Use RDatasetSpec to start reading at the current row - long long start_l = currentRow; - ROOT::RDF::Experimental::RDatasetSpec x_spec = - ROOT::RDF::Experimental::RDatasetSpec() - .AddSample({"", fTreeName, fFileName}) - .WithGlobalRange({start_l, std::numeric_limits::max()}); - - ROOT::RDataFrame x_rdf(x_spec); - - // Load events if filters are given - if (fFilters.size() > 0) { - return loadFiltered(x_rdf, func); - } - - // load events if no filters are given - return loadNonFiltered(x_rdf, func); - } - -private: - /// \brief Add filters to the RDataFrame and load a chunk of data - /// \param x_rdf - /// \param func - /// \return A pair of size_t defining the number of events processed and how many passed all filters - std::pair loadFiltered(ROOT::RDataFrame &x_rdf, RChunkLoaderFunctor &func) - { - // Add the given filters to the RDataFrame - auto x_filter = x_rdf.Filter(fFilters, "RBatchGenerator_Filter"); - - // add range to the DataFrame - auto x_ranged = x_filter.Range(fChunkSize); - auto myReport = x_ranged.Report(); - - // load data - x_ranged.Foreach(func, fCols); - - // Use the report to gather the number of events processed and passed. - // passed_events is used to determine the starting event of the next chunk - // processed_events is used to determine if the end of the database is reached. - std::size_t processed_events = myReport.begin()->GetAll(); - std::size_t passed_events = (myReport.end() - 1)->GetPass(); - - return std::make_pair(processed_events, passed_events); - } - - /// \brief Loop over the events in the dataframe untill either the end of the dataframe - /// is reached, or a full chunk is loaded - /// \param x_rdf - /// \param func - /// \return A pair of size_t defining the number of events processed and how many passed all filters - std::pair loadNonFiltered(ROOT::RDataFrame &x_rdf, RChunkLoaderFunctor &func) - { - // add range - auto x_ranged = x_rdf.Range(fChunkSize); - // auto x_ranged = x_rdf.Range(currentRow, currentRow + fChunkSize); - auto myCount = x_ranged.Count(); - - // load data - x_ranged.Foreach(func, fCols); - - // get loading info - std::size_t processed_events = myCount.GetValue(); - std::size_t passed_events = myCount.GetValue(); - return std::make_pair(processed_events, passed_events); - } -}; - -} // namespace Internal -} // namespace Experimental -} // namespace TMVA -#endif // TMVA_CHUNKLOADER diff --git a/tmva/tmva/inc/TMVA/RTensor.hxx b/tmva/tmva/inc/TMVA/RTensor.hxx index ad152f219a98c..795cc7068966b 100644 --- a/tmva/tmva/inc/TMVA/RTensor.hxx +++ b/tmva/tmva/inc/TMVA/RTensor.hxx @@ -3,6 +3,7 @@ #include #include // std::size_t +#include #include // std::runtime_error #include // std::stringstream #include // std::shared_ptr diff --git a/tree/dataframe/inc/ROOT/RDF/RInterface.hxx b/tree/dataframe/inc/ROOT/RDF/RInterface.hxx index 2305fca5465b6..1d33e2e5f8c2a 100644 --- a/tree/dataframe/inc/ROOT/RDF/RInterface.hxx +++ b/tree/dataframe/inc/ROOT/RDF/RInterface.hxx @@ -89,6 +89,7 @@ namespace Internal { namespace RDF { class GraphCreatorHelper; void ChangeEmptyEntryRange(const ROOT::RDF::RNode &node, std::pair &&newRange); +void ChangeBeginAndEndEntries(const RNode &node, Long64_t begin, Long64_t end); void ChangeSpec(const ROOT::RDF::RNode &node, ROOT::RDF::Experimental::RDatasetSpec &&spec); void TriggerRun(ROOT::RDF::RNode node); std::string GetDataSourceLabel(const ROOT::RDF::RNode &node); @@ -123,6 +124,7 @@ class RInterface : public RInterfaceBase { friend void RDFInternal::TriggerRun(RNode node); friend void RDFInternal::ChangeEmptyEntryRange(const RNode &node, std::pair &&newRange); + friend void RDFInternal::ChangeBeginAndEndEntries(const RNode &node, Long64_t start, Long64_t end); friend void RDFInternal::ChangeSpec(const RNode &node, ROOT::RDF::Experimental::RDatasetSpec &&spec); friend std::string ROOT::Internal::RDF::GetDataSourceLabel(const RNode &node); std::shared_ptr fProxiedPtr; ///< Smart pointer to the graph node encapsulated by this RInterface. diff --git a/tree/dataframe/inc/ROOT/RDF/RLoopManager.hxx b/tree/dataframe/inc/ROOT/RDF/RLoopManager.hxx index 74e40d4370e26..31967a245cde2 100644 --- a/tree/dataframe/inc/ROOT/RDF/RLoopManager.hxx +++ b/tree/dataframe/inc/ROOT/RDF/RLoopManager.hxx @@ -264,6 +264,7 @@ public: void AddSampleCallback(void *nodePtr, ROOT::RDF::SampleCallback_t &&callback); void SetEmptyEntryRange(std::pair &&newRange); + void ChangeBeginAndEndEntries(Long64_t begin, Long64_t end); void ChangeSpec(ROOT::RDF::Experimental::RDatasetSpec &&spec); ROOT::Internal::RDF::RStringCache &GetColumnNamesCache() { return fCachedColNames; } diff --git a/tree/dataframe/inc/ROOT/RDF/RMergeableValue.hxx b/tree/dataframe/inc/ROOT/RDF/RMergeableValue.hxx index 5971e6c76c11f..3c5aee1090b31 100644 --- a/tree/dataframe/inc/ROOT/RDF/RMergeableValue.hxx +++ b/tree/dataframe/inc/ROOT/RDF/RMergeableValue.hxx @@ -568,8 +568,8 @@ type-erased RMergeableValueBase objects. */ class RMergeableVariationsBase : public RMergeableValueBase { protected: - std::vector fKeys; - std::vector> fValues; + std::vector fKeys{}; + std::vector> fValues{}; public: /** @@ -591,6 +591,7 @@ public: { } RMergeableVariationsBase &operator=(RMergeableVariationsBase &&) = delete; + ~RMergeableVariationsBase() override = default; ///////////////////////////////////////////////////////////////////////////// /// \brief Constructor that initializes data members. @@ -660,6 +661,7 @@ public: RMergeableVariations &operator=(const RMergeableVariations &) = delete; RMergeableVariations(RMergeableVariations &&) = delete; RMergeableVariations &operator=(RMergeableVariations &&) = delete; + ~RMergeableVariations() final = default; ///////////////////////////////////////////////////////////////////////////// /// \brief Constructor that initializes data members. diff --git a/tree/dataframe/inc/ROOT/RDF/RResultMap.hxx b/tree/dataframe/inc/ROOT/RDF/RResultMap.hxx index ec7f61b63baa1..163e8823fbc85 100644 --- a/tree/dataframe/inc/ROOT/RDF/RResultMap.hxx +++ b/tree/dataframe/inc/ROOT/RDF/RResultMap.hxx @@ -211,17 +211,16 @@ template std::unique_ptr> GetMergeableValue(ROOT::RDF::Experimental::RResultMap &rmap) { rmap.RunEventLoopIfNeeded(); - - std::unique_ptr mVariationsBase; if (rmap.fVariedAction != nullptr) { - auto mValueBase = rmap.fVariedAction->GetMergeableValue(); - mVariationsBase.reset(static_cast(mValueBase.release())); // downcast unique_ptr + std::unique_ptr mVariationsBase{ + static_cast(rmap.fVariedAction->GetMergeableValue().release())}; + mVariationsBase->AddNominal(rmap.fNominalAction->GetMergeableValue()); + return std::make_unique>(std::move(*mVariationsBase)); } else { - mVariationsBase = std::unique_ptr({}, {}); + auto ret = std::make_unique>(); + ret->AddNominal(rmap.fNominalAction->GetMergeableValue()); + return ret; } - mVariationsBase->AddNominal(rmap.fNominalAction->GetMergeableValue()); - - return std::make_unique>(std::move(*mVariationsBase)); } } // namespace RDF } // namespace Detail diff --git a/tree/dataframe/src/RInterface.cxx b/tree/dataframe/src/RInterface.cxx index d0ae267418b2f..45e821fb4548f 100644 --- a/tree/dataframe/src/RInterface.cxx +++ b/tree/dataframe/src/RInterface.cxx @@ -17,6 +17,12 @@ void ROOT::Internal::RDF::ChangeEmptyEntryRange(const ROOT::RDF::RNode &node, node.GetLoopManager()->SetEmptyEntryRange(std::move(newRange)); } +void ROOT::Internal::RDF::ChangeBeginAndEndEntries(const ROOT::RDF::RNode &node, Long64_t begin, Long64_t end) +{ + R__ASSERT(end >= begin && "end is less than begin in the passed entry range!"); + node.GetLoopManager()->ChangeBeginAndEndEntries(begin, end); +} + /** * \brief Changes the input dataset specification of an RDataFrame. * diff --git a/tree/dataframe/src/RLoopManager.cxx b/tree/dataframe/src/RLoopManager.cxx index a0a049ecd08a9..ba16065253714 100644 --- a/tree/dataframe/src/RLoopManager.cxx +++ b/tree/dataframe/src/RLoopManager.cxx @@ -42,6 +42,25 @@ #include "ROOT/RNTupleDS.hxx" #endif +#ifdef R__UNIX +// Functions needed to perform EOS XRootD redirection in ChangeSpec +#include +#include "TEnv.h" +#include "TSystem.h" +#ifndef R__FBSD +#include +#else +#include +#endif +#ifdef R__MACOSX +/* On macOS getxattr takes two extra arguments that should be set to 0 */ +#define getxattr(path, name, value, size) getxattr(path, name, value, size, 0u, 0) +#endif +#ifdef R__FBSD +#define getxattr(path, name, value, size) extattr_get_file(path, EXTATTR_NAMESPACE_USER, name, value, size) +#endif +#endif + #include #include #include @@ -403,6 +422,38 @@ RLoopManager::RLoopManager(ROOT::RDF::Experimental::RDatasetSpec &&spec) ChangeSpec(std::move(spec)); } +#ifdef R__UNIX +namespace { +std::optional GetRedirectedSampleId(std::string_view path, std::string_view datasetName) +{ + // Mimick the redirection done in TFile::Open to see if the path points to a FUSE-mounted EOS path. + // If so, we create a redirected sample ID with the full xroot URL. + TString expandedUrl(path.data()); + gSystem->ExpandPathName(expandedUrl); + if (gEnv->GetValue("TFile.CrossProtocolRedirects", 1) == 1) { + TUrl fileurl(expandedUrl, /* default is file */ kTRUE); + if (strcmp(fileurl.GetProtocol(), "file") == 0) { + ssize_t len = getxattr(fileurl.GetFile(), "eos.url.xroot", nullptr, 0); + if (len > 0) { + std::string xurl(len, 0); + std::string fileNameFromUrl{fileurl.GetFile()}; + if (getxattr(fileNameFromUrl.c_str(), "eos.url.xroot", &xurl[0], len) == len) { + // Sometimes the `getxattr` call may return an invalid URL due + // to the POSIX attribute not being yet completely filled by EOS. + if (auto baseName = fileNameFromUrl.substr(fileNameFromUrl.find_last_of("/") + 1); + std::equal(baseName.crbegin(), baseName.crend(), xurl.crbegin())) { + return xurl + '/' + datasetName.data(); + } + } + } + } + } + + return std::nullopt; +} +} // namespace +#endif + /** * @brief Changes the internal TTree held by the RLoopManager. * @@ -441,6 +492,11 @@ void RLoopManager::ChangeSpec(ROOT::RDF::Experimental::RDatasetSpec &&spec) // is exposed to users via RSampleInfo and DefinePerSample). const auto sampleId = files[i] + '/' + trees[i]; fSampleMap.insert({sampleId, &sample}); +#ifdef R__UNIX + // Also add redirected EOS xroot URL when available + if (auto redirectedSampleId = GetRedirectedSampleId(files[i], trees[i])) + fSampleMap.insert({redirectedSampleId.value(), &sample}); +#endif } } SetTree(std::move(chain)); @@ -1160,6 +1216,12 @@ void RLoopManager::SetEmptyEntryRange(std::pair &&newRange fEmptyEntryRange = std::move(newRange); } +void RLoopManager::ChangeBeginAndEndEntries(Long64_t begin, Long64_t end) +{ + fBeginEntry = begin; + fEndEntry = end; +} + /** * \brief Helper function to open a file (or the first file from a glob). * This function is used at construction time of an RDataFrame, to check the @@ -1167,15 +1229,35 @@ void RLoopManager::SetEmptyEntryRange(std::pair &&newRange */ std::unique_ptr OpenFileWithSanityChecks(std::string_view fileNameGlob) { - bool fileIsGlob = [&fileNameGlob]() { - const std::vector wildcards = {"[", "]", "*", "?"}; // Wildcards accepted by TChain::Add - return std::any_of(wildcards.begin(), wildcards.end(), - [&fileNameGlob](const auto &wc) { return fileNameGlob.find(wc) != std::string_view::npos; }); + // Follow same logic in TChain::Add to find the correct string to look for globbing: + // - If the extension ".root" is present in the file name, pass along the basename. + // - If not, use the "?" token to delimit the part of the string which represents the basename. + // - Otherwise, pass the full filename. + auto &&baseNameAndQuery = [&fileNameGlob]() { + constexpr std::string_view delim{".root"}; + if (auto &&it = std::find_end(fileNameGlob.begin(), fileNameGlob.end(), delim.begin(), delim.end()); + it != fileNameGlob.end()) { + auto &&distanceToEndOfDelim = std::distance(fileNameGlob.begin(), it + delim.length()); + return std::make_pair(fileNameGlob.substr(0, distanceToEndOfDelim), fileNameGlob.substr(distanceToEndOfDelim)); + } else if (auto &&lastQuestionMark = fileNameGlob.find_last_of('?'); lastQuestionMark != std::string_view::npos) + return std::make_pair(fileNameGlob.substr(0, lastQuestionMark), fileNameGlob.substr(lastQuestionMark)); + else + return std::make_pair(fileNameGlob, std::string_view{}); + }(); + // Captured structured bindings variable are only valid since C++20 + auto &&baseName = baseNameAndQuery.first; + auto &&query = baseNameAndQuery.second; + + const auto nameHasWildcard = [&baseName]() { + constexpr std::array wildCards{'[', ']', '*', '?'}; // Wildcards accepted by TChain::Add + return std::any_of(wildCards.begin(), wildCards.end(), + [&baseName](auto &&wc) { return baseName.find(wc) != std::string_view::npos; }); }(); // Open first file in case of glob, suppose all files in the glob use the same data format - std::string fileToOpen{fileIsGlob ? ROOT::Internal::TreeUtils::ExpandGlob(std::string{fileNameGlob})[0] - : fileNameGlob}; + std::string fileToOpen{nameHasWildcard + ? ROOT::Internal::TreeUtils::ExpandGlob(std::string{baseName})[0] + std::string{query} + : fileNameGlob}; ::TDirectory::TContext ctxt; // Avoid changing gDirectory; std::unique_ptr inFile{TFile::Open(fileToOpen.c_str(), "READ_WITHOUT_GLOBALREGISTRATION")}; @@ -1207,6 +1289,8 @@ std::shared_ptr ROOT::Detail::RDF::CreateLMFromTTree(std::string_view datasetName, const std::vector &fileNameGlobs, const std::vector &defaultColumns, bool checkFile) { + if (fileNameGlobs.size() == 0) + throw std::invalid_argument("RDataFrame: empty list of input files."); // Introduce the same behaviour as in CreateLMFromFile for consistency. // Creating an RDataFrame with a non-existing file will throw early rather // than wait for the start of the graph execution. @@ -1262,6 +1346,9 @@ ROOT::Detail::RDF::CreateLMFromFile(std::string_view datasetName, const std::vec const ROOT::RDF::ColumnNames_t &defaultColumns) { + if (fileNameGlobs.size() == 0) + throw std::invalid_argument("RDataFrame: empty list of input files."); + auto inFile = OpenFileWithSanityChecks(fileNameGlobs[0]); if (inFile->Get(datasetName.data())) { diff --git a/tree/dataframe/test/dataframe_cloning.cxx b/tree/dataframe/test/dataframe_cloning.cxx index 966f6571dbc68..53d807bbfe644 100644 --- a/tree/dataframe/test/dataframe_cloning.cxx +++ b/tree/dataframe/test/dataframe_cloning.cxx @@ -9,13 +9,14 @@ #include // CloneResultAndAction #include #include -#include // ChangeEmptyEntryRange, ChangeSpec +#include // ChangeEmptyEntryRange, ChangeBeginAndEndEntries, ChangeSpec #include // CloneResultAndAction #include // ULong64_t #include // AccessPathName #include +using ROOT::Internal::RDF::ChangeBeginAndEndEntries; using ROOT::Internal::RDF::ChangeEmptyEntryRange; using ROOT::Internal::RDF::ChangeSpec; using ROOT::Internal::RDF::CloneResultAndAction; @@ -373,6 +374,34 @@ TEST(RDataFrameCloning, ChangeEmptyEntryRange) EXPECT_EQ(df.GetNRuns(), 3); } +TEST(RDataFrameCloning, ChangeBeginAndEndEntries) +{ + auto treeName{"events"}; + auto firstFile{"test_rdataframe_cloneactions_ChangeEmptyRange.root"}; + { + ROOT::RDataFrame df{10}; + df.Define("x", [](ULong64_t e) { return e; }, {"rdfentry_"}).Snapshot(treeName, firstFile); + } + + ROOT::RDataFrame df{treeName, firstFile}; + + ChangeBeginAndEndEntries(df, 0, 3); + auto take1 = df.Take("x"); + EXPECT_VEC_EQ(*take1, {0, 1, 2}); + + ChangeBeginAndEndEntries(df, 3, 7); + auto take2 = CloneResultAndAction(take1); + EXPECT_VEC_EQ(*take2, {3, 4, 5, 6}); + + ChangeBeginAndEndEntries(df, 7, 10); + auto take3 = CloneResultAndAction(take2); + EXPECT_VEC_EQ(*take3, {7, 8, 9}); + + EXPECT_EQ(df.GetNRuns(), 3); + + gSystem->Unlink(firstFile); +} + TEST(RDataFrameCloning, ChangeSpec) { std::string treeName{"events"}; diff --git a/tree/dataframe/test/dataframe_regression.cxx b/tree/dataframe/test/dataframe_regression.cxx index a9cb14c574044..323e21895f931 100644 --- a/tree/dataframe/test/dataframe_regression.cxx +++ b/tree/dataframe/test/dataframe_regression.cxx @@ -274,6 +274,63 @@ TEST_P(RDFRegressionTests, UseAfterDeleteOfSampleCallbacks) df.Count().GetValue(); } +// #16475 +struct DatasetGuard { + DatasetGuard(std::string_view treeName, std::string_view fileName) : fTreeName(treeName), fFileName(fileName) + { + TFile f{fFileName.c_str(), "recreate"}; + TTree t{fTreeName.c_str(), fTreeName.c_str()}; + int x{}; + t.Branch("x", &x, "x/I"); + for (auto i = 0; i < 10; i++) { + x = i; + t.Fill(); + } + f.Write(); + } + DatasetGuard(const DatasetGuard &) = delete; + DatasetGuard &operator=(const DatasetGuard &) = delete; + DatasetGuard(DatasetGuard &&) = delete; + DatasetGuard &operator=(DatasetGuard &&) = delete; + ~DatasetGuard() { std::remove(fFileName.c_str()); } + std::string fTreeName; + std::string fFileName; +}; + +TEST_P(RDFRegressionTests, FileNameQuery) +{ + DatasetGuard dataset{"events", "dataframe_regression_filenamequery.root"}; + constexpr auto fileNameWithQuery{"dataframe_regression_filenamequery.root?myq=xyz"}; + ROOT::RDataFrame df{dataset.fTreeName, fileNameWithQuery}; + EXPECT_EQ(df.Count().GetValue(), 10); +} + +TEST_P(RDFRegressionTests, FileNameWildcardQuery) +{ + DatasetGuard dataset{"events", "dataframe_regression_filenamewildcardquery.root"}; + constexpr auto fileNameWithQuery{"dataframe_regress?on_filenamewildcardquery.root?myq=xyz"}; + ROOT::RDataFrame df{dataset.fTreeName, fileNameWithQuery}; + EXPECT_EQ(df.Count().GetValue(), 10); +} + +TEST_P(RDFRegressionTests, FileNameQueryNoExt) +{ + DatasetGuard dataset{"events", "dataframe_regression_filenamequerynoext"}; + constexpr auto fileNameWithQuery{"dataframe_regression_filenamequerynoext?myq=xyz"}; + ROOT::RDataFrame df{dataset.fTreeName, fileNameWithQuery}; + EXPECT_EQ(df.Count().GetValue(), 10); +} + +TEST_P(RDFRegressionTests, EmptyFileList) +{ + try { + ROOT::RDataFrame df{"", {}}; + } catch (const std::invalid_argument &e) { + const std::string expected{"RDataFrame: empty list of input files."}; + EXPECT_EQ(e.what(), expected); + } +} + // run single-thread tests INSTANTIATE_TEST_SUITE_P(Seq, RDFRegressionTests, ::testing::Values(false)); diff --git a/tree/ntuple/v7/doc/BinaryFormatSpecification.md b/tree/ntuple/v7/doc/BinaryFormatSpecification.md index d0603ff833ec0..28165d0e36b36 100644 --- a/tree/ntuple/v7/doc/BinaryFormatSpecification.md +++ b/tree/ntuple/v7/doc/BinaryFormatSpecification.md @@ -1,6 +1,4 @@ -# RNTuple Binary Format Specification 0.3.0.0 - -**Note:** This is work in progress. The RNTuple specification is not yet finalized. +# RNTuple Binary Format Specification 1.0.0.0 ## Versioning Notes @@ -9,7 +7,7 @@ It uses the following scheme: EPOCH.MAJOR.MINOR.PATCH _Epoch_: an increment of the epoch indicates backward-incompatible changes. The RNTuple pre-release has epoch 0. -The first public release will get epoch 1. +The first public release has epoch 1. There is currently no further epoch foreseen. _Major_: an increment of the major version indicates forward-incompatible changes. @@ -1031,6 +1029,28 @@ The limits refer to a single RNTuple and do not consider combinations/joins such | Maximum string length (meta-data) | 4GB | String encoding | | Maximum RBlob size | 128 PiB | 1GiB / 8B * 1GiB (with maxKeySize=1GiB, offsetSize=8B) | +## Naming specification + +The name of an RNTuple as well as the name of a field cannot be represented with an empty string when persistified (e.g. +when written to disk). Furthermore, the allowed character set is restricted to Unicode characters encoded as UTF-8, +with the following exceptions: + +* All control codes. These notably include newline (U+000A) and horizontal tab (U+0009). +* Full stop (U+002E '.') +* Space (U+0020 ' ') +* Backslash (U+005C '\') +* Slash (U+002F '/') + +## Defaults + +This section summarizes default settings of `RNTupleWriteOptions`. + +| Default | Value | +|----------------------------------------|------------------------------| +| Approximate Zipped Cluster | 100 MB | +| Max Unzipped Cluster | 1 GiB | +| Max Unzipped Page | 1 MiB | + ## Glossary ### Anchor diff --git a/tree/ntuple/v7/inc/ROOT/RClusterPool.hxx b/tree/ntuple/v7/inc/ROOT/RClusterPool.hxx index e9c2e0d3ee22b..31a8599cdbfe5 100644 --- a/tree/ntuple/v7/inc/ROOT/RClusterPool.hxx +++ b/tree/ntuple/v7/inc/ROOT/RClusterPool.hxx @@ -67,9 +67,6 @@ private: struct RInFlightCluster { std::future> fFuture; RCluster::RKey fClusterKey; - /// By the time a cluster has been loaded, this cluster might not be necessary anymore. This can happen if - /// there are jumps in the access pattern (i.e. the access pattern deviates from linear access). - bool fIsExpired = false; bool operator ==(const RInFlightCluster &other) const { return (fClusterKey.fClusterId == other.fClusterKey.fClusterId) && diff --git a/tree/ntuple/v7/inc/ROOT/RColumnElementBase.hxx b/tree/ntuple/v7/inc/ROOT/RColumnElementBase.hxx index 818e1584d24f2..1b62f14b31ec8 100644 --- a/tree/ntuple/v7/inc/ROOT/RColumnElementBase.hxx +++ b/tree/ntuple/v7/inc/ROOT/RColumnElementBase.hxx @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -64,6 +65,20 @@ protected: } public: + /// Every concrete RColumnElement type is identified by its on-disk type (column type) and the + /// in-memory C++ type, given by a type index. + struct RIdentifier { + std::type_index fInMemoryType = std::type_index(typeid(void)); + EColumnType fOnDiskType = EColumnType::kUnknown; + + bool operator==(const RIdentifier &other) const + { + return this->fInMemoryType == other.fInMemoryType && this->fOnDiskType == other.fOnDiskType; + } + + bool operator!=(const RIdentifier &other) const { return !(*this == other); } + }; + RColumnElementBase(const RColumnElementBase &other) = default; RColumnElementBase(RColumnElementBase &&other) = default; RColumnElementBase &operator=(const RColumnElementBase &other) = delete; @@ -73,7 +88,7 @@ public: /// If CppT == void, use the default C++ type for the given column type template static std::unique_ptr Generate(EColumnType type); - static const char *GetTypeName(EColumnType type); + static const char *GetColumnTypeName(EColumnType type); /// Most types have a fixed on-disk bit width. Some low-precision column types /// have a range of possible bit widths. Return the minimum and maximum allowed /// bit size per type. @@ -113,78 +128,26 @@ public: std::size_t GetBitsOnStorage() const { return fBitsOnStorage; } std::optional> GetValueRange() const { return fValueRange; } std::size_t GetPackedSize(std::size_t nElements = 1U) const { return (nElements * fBitsOnStorage + 7) / 8; } -}; // class RColumnElementBase -// All supported C++ in-memory types -enum class EColumnCppType { - kChar, - kBool, - kByte, - kUint8, - kUint16, - kUint32, - kUint64, - kInt8, - kInt16, - kInt32, - kInt64, - kFloat, - kDouble, - kClusterSize, - kColumnSwitch, - kMax -}; - -inline constexpr EColumnCppType kTestFutureColumn = - static_cast(std::numeric_limits>::max() - 1); + virtual RIdentifier GetIdentifier() const = 0; +}; // class RColumnElementBase struct RTestFutureColumn { std::uint32_t dummy; }; -std::unique_ptr GenerateColumnElement(EColumnCppType cppType, EColumnType colType); +std::unique_ptr GenerateColumnElement(std::type_index inMemoryType, EColumnType onDiskType); + +std::unique_ptr GenerateColumnElement(const RColumnElementBase::RIdentifier &elementId); template -std::unique_ptr RColumnElementBase::Generate(EColumnType type) +std::unique_ptr RColumnElementBase::Generate(EColumnType onDiskType) { - if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kChar, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kBool, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kByte, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kUint8, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kUint16, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kUint32, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kUint64, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kInt8, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kInt16, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kInt32, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kInt64, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kFloat, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kDouble, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kClusterSize, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(EColumnCppType::kColumnSwitch, type); - else if constexpr (std::is_same_v) - return GenerateColumnElement(kTestFutureColumn, type); - else - static_assert(!sizeof(CppT), "Unsupported Cpp type"); + return GenerateColumnElement(std::type_index(typeid(CppT)), onDiskType); } template <> -std::unique_ptr RColumnElementBase::Generate(EColumnType type); +std::unique_ptr RColumnElementBase::Generate(EColumnType onDiskType); } // namespace ROOT::Experimental::Internal diff --git a/tree/ntuple/v7/inc/ROOT/RFieldBase.hxx b/tree/ntuple/v7/inc/ROOT/RFieldBase.hxx index 857c1db33e77a..62ad902554aac 100644 --- a/tree/ntuple/v7/inc/ROOT/RFieldBase.hxx +++ b/tree/ntuple/v7/inc/ROOT/RFieldBase.hxx @@ -475,8 +475,6 @@ public: /// Checks if the given type is supported by RNTuple. In case of success, the result vector is empty. /// Otherwise there is an error record for each failing sub field (sub type). static std::vector Check(const std::string &fieldName, const std::string &typeName); - /// Check whether a given string is a valid field name - static RResult EnsureValidFieldName(std::string_view fieldName); /// Generates an object of the field type and allocates new initialized memory according to the type. /// Implemented at the end of this header because the implementation is using RField::TypeName() diff --git a/tree/ntuple/v7/inc/ROOT/RMiniFile.hxx b/tree/ntuple/v7/inc/ROOT/RMiniFile.hxx index 412294ece0a38..e72142b843b05 100644 --- a/tree/ntuple/v7/inc/ROOT/RMiniFile.hxx +++ b/tree/ntuple/v7/inc/ROOT/RMiniFile.hxx @@ -27,8 +27,7 @@ #include #include -class TCollection; -class TFile; +class TDirectory; class TFileMergeInfo; class TVirtualStreamerInfo; @@ -69,8 +68,15 @@ private: /// Used when the file container turns out to be a bare file RResult GetNTupleBare(std::string_view ntupleName); - /// Used when the file turns out to be a TFile container - RResult GetNTupleProper(std::string_view ntupleName); + /// Used when the file turns out to be a TFile container. The ntuplePath variable is either the ntuple name + /// or an ntuple name preceded by a directory (`myNtuple` or `foo/bar/myNtuple` or `/foo/bar/myNtuple`) + RResult GetNTupleProper(std::string_view ntuplePath); + + /// Searches for a key with the given name and type in the key index of the directory starting at offsetDir. + /// The offset points to the start of the TDirectory DATA section, without the key and without the name and title + /// of the TFile record (the root directory). + /// Return 0 if the key was not found. Otherwise returns the offset of found key. + std::uint64_t SearchInDirectory(std::uint64_t &offsetDir, std::string_view keyName, std::string_view typeName); public: RMiniFileReader() = default; @@ -103,12 +109,13 @@ A stand-alone version of RNTuple can remove the TFile based writer. class RNTupleFileWriter { private: struct RFileProper { - TFile *fFile = nullptr; + /// A sub directory in fFile or nullptr if the data is stored in the root directory of the file + TDirectory *fDirectory = nullptr; /// Low-level writing using a TFile void Write(const void *buffer, size_t nbytes, std::int64_t offset); /// Writes an RBlob opaque key with the provided buffer as data record and returns the offset of the record std::uint64_t WriteKey(const void *buffer, size_t nbytes, size_t len); - operator bool() const { return fFile; } + operator bool() const { return fDirectory; } }; struct RFileSimple { @@ -203,8 +210,9 @@ public: static std::unique_ptr Recreate(std::string_view ntupleName, std::string_view path, EContainerFormat containerFormat, const RNTupleWriteOptions &options); - /// Add a new RNTuple identified by ntupleName to the existing TFile. - static std::unique_ptr Append(std::string_view ntupleName, TFile &file, std::uint64_t maxKeySize); + /// The directory parameter can also be a TFile object (TFile inherits from TDirectory). + static std::unique_ptr + Append(std::string_view ntupleName, TDirectory &fileOrDirectory, std::uint64_t maxKeySize); RNTupleFileWriter(const RNTupleFileWriter &other) = delete; RNTupleFileWriter(RNTupleFileWriter &&other) = delete; diff --git a/tree/ntuple/v7/inc/ROOT/RNTuple.hxx b/tree/ntuple/v7/inc/ROOT/RNTuple.hxx index bb73537acc509..638c6cd56b92f 100644 --- a/tree/ntuple/v7/inc/ROOT/RNTuple.hxx +++ b/tree/ntuple/v7/inc/ROOT/RNTuple.hxx @@ -76,8 +76,8 @@ class RNTuple final { std::uint64_t nbytesFooter, std::uint64_t lenFooter, std::uint64_t maxKeySize); public: - static constexpr std::uint16_t kVersionEpoch = 0; - static constexpr std::uint16_t kVersionMajor = 3; + static constexpr std::uint16_t kVersionEpoch = 1; + static constexpr std::uint16_t kVersionMajor = 0; static constexpr std::uint16_t kVersionMinor = 0; static constexpr std::uint16_t kVersionPatch = 0; diff --git a/tree/ntuple/v7/inc/ROOT/RNTupleDescriptor.hxx b/tree/ntuple/v7/inc/ROOT/RNTupleDescriptor.hxx index a013b3c139ba4..eb2df2b45d05a 100644 --- a/tree/ntuple/v7/inc/ROOT/RNTupleDescriptor.hxx +++ b/tree/ntuple/v7/inc/ROOT/RNTupleDescriptor.hxx @@ -162,6 +162,7 @@ public: RValueRange(std::pair range) : fMin(range.first), fMax(range.second) {} bool operator==(RValueRange other) const { return fMin == other.fMin && fMax == other.fMax; } + bool operator!=(RValueRange other) const { return !(*this == other); } }; private: diff --git a/tree/ntuple/v7/inc/ROOT/RNTupleMerger.hxx b/tree/ntuple/v7/inc/ROOT/RNTupleMerger.hxx index 79d3133c60b08..8b340657e4d46 100644 --- a/tree/ntuple/v7/inc/ROOT/RNTupleMerger.hxx +++ b/tree/ntuple/v7/inc/ROOT/RNTupleMerger.hxx @@ -49,7 +49,7 @@ enum class ENTupleMergeErrBehavior { kSkip }; -struct RColumnInfo; +struct RColumnMergeInfo; struct RNTupleMergeData; struct RSealedPageMergeData; @@ -80,12 +80,12 @@ class RNTupleMerger final { std::unique_ptr fPageAlloc; std::optional fTaskGroup; - void MergeCommonColumns(RClusterPool &clusterPool, DescriptorId_t clusterId, std::span commonColumns, - RCluster::ColumnSet_t commonColumnSet, RSealedPageMergeData &sealedPageData, - const RNTupleMergeData &mergeData); + void MergeCommonColumns(RClusterPool &clusterPool, DescriptorId_t clusterId, + std::span commonColumns, const RCluster::ColumnSet_t &commonColumnSet, + RSealedPageMergeData &sealedPageData, const RNTupleMergeData &mergeData); - void MergeSourceClusters(RPageSource &source, std::span commonColumns, - std::span extraDstColumns, RNTupleMergeData &mergeData); + void MergeSourceClusters(RPageSource &source, std::span commonColumns, + std::span extraDstColumns, RNTupleMergeData &mergeData); public: RNTupleMerger(); diff --git a/tree/ntuple/v7/inc/ROOT/RNTupleModel.hxx b/tree/ntuple/v7/inc/ROOT/RNTupleModel.hxx index 187be224bc6cd..24ad8a11daf4d 100644 --- a/tree/ntuple/v7/inc/ROOT/RNTupleModel.hxx +++ b/tree/ntuple/v7/inc/ROOT/RNTupleModel.hxx @@ -214,8 +214,9 @@ private: /// Changed by Freeze() / Unfreeze() and by the RUpdater. bool fIsFrozen = false; - /// Checks that user-provided field names are valid in the context - /// of this NTuple model. Throws an RException for invalid names. + /// Checks that user-provided field names are valid in the context of this RNTuple model. + /// Throws an RException for invalid names, empty names (which is reserved for the zero field) and duplicate field + /// names. void EnsureValidFieldName(std::string_view fieldName); /// Throws an RException if fFrozen is true diff --git a/tree/ntuple/v7/inc/ROOT/RNTupleParallelWriter.hxx b/tree/ntuple/v7/inc/ROOT/RNTupleParallelWriter.hxx index af55307d234e2..712aa78442b18 100644 --- a/tree/ntuple/v7/inc/ROOT/RNTupleParallelWriter.hxx +++ b/tree/ntuple/v7/inc/ROOT/RNTupleParallelWriter.hxx @@ -24,7 +24,7 @@ #include #include -class TFile; +class TDirectory; namespace ROOT { namespace Experimental { @@ -86,7 +86,7 @@ public: const RNTupleWriteOptions &options = RNTupleWriteOptions()); /// Append an ntuple to the existing file, which must not be accessed while data is filled into any created context. static std::unique_ptr Append(std::unique_ptr model, - std::string_view ntupleName, TFile &file, + std::string_view ntupleName, TDirectory &fileOrDirectory, const RNTupleWriteOptions &options = RNTupleWriteOptions()); ~RNTupleParallelWriter(); diff --git a/tree/ntuple/v7/inc/ROOT/RNTupleUtil.hxx b/tree/ntuple/v7/inc/ROOT/RNTupleUtil.hxx index 03601283dc596..37fcc3db0572d 100644 --- a/tree/ntuple/v7/inc/ROOT/RNTupleUtil.hxx +++ b/tree/ntuple/v7/inc/ROOT/RNTupleUtil.hxx @@ -19,8 +19,10 @@ #include #include +#include #include +#include #include #include @@ -271,6 +273,9 @@ inline constexpr ENTupleStructure kTestFutureFieldStructure = inline constexpr RNTupleLocator::ELocatorType kTestLocatorType = static_cast(0x7e); static_assert(kTestLocatorType < RNTupleLocator::ELocatorType::kLastSerializableType); +/// Check whether a given string is a valid name according to the RNTuple specification +RResult EnsureValidNameForRNTuple(std::string_view name, std::string_view where); + } // namespace Internal } // namespace Experimental diff --git a/tree/ntuple/v7/inc/ROOT/RNTupleView.hxx b/tree/ntuple/v7/inc/ROOT/RNTupleView.hxx index 2b170aa4b6390..9e55ea526bf1e 100644 --- a/tree/ntuple/v7/inc/ROOT/RNTupleView.hxx +++ b/tree/ntuple/v7/inc/ROOT/RNTupleView.hxx @@ -16,6 +16,7 @@ #ifndef ROOT7_RNTupleView #define ROOT7_RNTupleView +#include #include #include #include @@ -67,9 +68,10 @@ public: }; RNTupleGlobalRange(NTupleSize_t start, NTupleSize_t end) : fStart(start), fEnd(end) {} - RIterator begin() { return RIterator(fStart); } - RIterator end() { return RIterator(fEnd); } - NTupleSize_t size() { return fEnd - fStart; } + RIterator begin() const { return RIterator(fStart); } + RIterator end() const { return RIterator(fEnd); } + NTupleSize_t size() const { return fEnd - fStart; } + bool IsValid() const { return (fStart != kInvalidNTupleIndex) && (fEnd != kInvalidNTupleIndex); } }; @@ -111,8 +113,9 @@ public: RNTupleClusterRange(DescriptorId_t clusterId, ClusterSize_t::ValueType start, ClusterSize_t::ValueType end) : fClusterId(clusterId), fStart(start), fEnd(end) {} - RIterator begin() { return RIterator(RClusterIndex(fClusterId, fStart)); } - RIterator end() { return RIterator(RClusterIndex(fClusterId, fEnd)); } + RIterator begin() const { return RIterator(RClusterIndex(fClusterId, fStart)); } + RIterator end() const { return RIterator(RClusterIndex(fClusterId, fEnd)); } + NTupleSize_t size() const { return fEnd - fStart; } }; namespace Internal { @@ -120,7 +123,8 @@ namespace Internal { /// Helper to get the iteration space of the given field that needs to be connected to the given page source. /// The indexes are given by the number of elements of the principal column of the field or, if none exists, /// by the number of elements of the first principal column found in the subfields searched by BFS. -/// If the field hierarchy is empty on columns, throw an exception. +/// If the field hierarchy is empty on columns, the returned field range is invalid (start and end set to +/// kInvalidNTupleIndex). An attempt to use such a field range in RNTupleViewBase::GetFieldRange will throw. RNTupleGlobalRange GetFieldRange(const RFieldBase &field, const RPageSource &pageSource); } // namespace Internal @@ -187,8 +191,16 @@ public: ~RNTupleViewBase() = default; const RFieldBase &GetField() const { return *fField; } + RFieldBase::RBulk CreateBulk() { return fField->CreateBulk(); } + const RFieldBase::RValue &GetValue() const { return fValue; } - RNTupleGlobalRange GetFieldRange() const { return fFieldRange; } + RNTupleGlobalRange GetFieldRange() const + { + if (!fFieldRange.IsValid()) { + throw RException(R__FAIL("field iteration over empty fields is unsupported: " + fField->GetFieldName())); + } + return fFieldRange; + } void Bind(std::shared_ptr objPtr) { fValue.Bind(objPtr); } void BindRawPtr(T *rawPtr) { fValue.BindRawPtr(rawPtr); } diff --git a/tree/ntuple/v7/inc/ROOT/RNTupleWriter.hxx b/tree/ntuple/v7/inc/ROOT/RNTupleWriter.hxx index 7dbb61b855622..f4b211b3a1155 100644 --- a/tree/ntuple/v7/inc/ROOT/RNTupleWriter.hxx +++ b/tree/ntuple/v7/inc/ROOT/RNTupleWriter.hxx @@ -32,7 +32,7 @@ #include #include -class TFile; +class TDirectory; namespace ROOT { namespace Experimental { @@ -94,7 +94,7 @@ public: std::string_view storage, const RNTupleWriteOptions &options = RNTupleWriteOptions()); /// Throws an exception if the model is null. static std::unique_ptr Append(std::unique_ptr model, std::string_view ntupleName, - TFile &file, + TDirectory &fileOrDirectory, const RNTupleWriteOptions &options = RNTupleWriteOptions()); RNTupleWriter(const RNTupleWriter &) = delete; RNTupleWriter &operator=(const RNTupleWriter &) = delete; diff --git a/tree/ntuple/v7/inc/ROOT/RPagePool.hxx b/tree/ntuple/v7/inc/ROOT/RPagePool.hxx index 31371f953fb2b..1f5025ce16914 100644 --- a/tree/ntuple/v7/inc/ROOT/RPagePool.hxx +++ b/tree/ntuple/v7/inc/ROOT/RPagePool.hxx @@ -22,6 +22,8 @@ #include #include +#include +#include #include namespace ROOT { @@ -43,11 +45,17 @@ Page sources also use the page pool to stage (preload) pages unsealed by IMT tas class RPagePool { friend class RPageRef; + // TODO(jblomer): move column ID from RPage to this struct + struct RPageInfo { + std::type_index fInMemoryType = std::type_index(typeid(void)); + }; + /// TODO(jblomer): should be an efficient index structure that allows /// - random insert /// - random delete /// - searching by page std::vector fPages; + std::vector fPageInfos; std::vector fReferences; std::mutex fLock; @@ -64,13 +72,13 @@ public: /// Adds a new page to the pool. Upon registration, the page pool takes ownership of the page's memory. /// The new page has its reference counter set to 1. - RPageRef RegisterPage(RPage page); + RPageRef RegisterPage(RPage page, std::type_index inMemoryType); /// Like RegisterPage() but the reference counter is initialized to 0 - void PreloadPage(RPage page); + void PreloadPage(RPage page, std::type_index inMemoryType); /// Tries to find the page corresponding to column and index in the cache. If the page is found, its reference /// counter is increased - RPageRef GetPage(ColumnId_t columnId, NTupleSize_t globalIndex); - RPageRef GetPage(ColumnId_t columnId, RClusterIndex clusterIndex); + RPageRef GetPage(ColumnId_t columnId, std::type_index inMemoryType, NTupleSize_t globalIndex); + RPageRef GetPage(ColumnId_t columnId, std::type_index inMemoryType, RClusterIndex clusterIndex); }; // clang-format off diff --git a/tree/ntuple/v7/inc/ROOT/RPageStorage.hxx b/tree/ntuple/v7/inc/ROOT/RPageStorage.hxx index 2ed32205e11ed..67167f9d1b07c 100644 --- a/tree/ntuple/v7/inc/ROOT/RPageStorage.hxx +++ b/tree/ntuple/v7/inc/ROOT/RPageStorage.hxx @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -38,6 +39,7 @@ #include #include #include +#include #include #include @@ -48,7 +50,6 @@ class RNTupleModel; namespace Internal { class RColumn; -class RColumnElementBase; class RNTupleCompressor; struct RNTupleModelChangeset; class RPageAllocator; @@ -628,17 +629,32 @@ protected: Detail::RNTupleCalcPerf &fCompressionRatio; }; - /// Keeps track of the requested physical column IDs. When using alias columns (projected fields), physical - /// columns may be requested multiple times. + /// Keeps track of the requested physical column IDs and their in-memory target type via a column element identifier. + /// When using alias columns (projected fields), physical columns may be requested multiple times. class RActivePhysicalColumns { + public: + struct RColumnInfo { + RColumnElementBase::RIdentifier fElementId; + std::size_t fRefCounter = 0; + }; + private: - std::vector fIDs; - std::vector fRefCounters; + /// Maps physical column IDs to all the requested in-memory representations. + /// A pair of physical column ID and in-memory representation can be requested multiple times, which is + /// indicated by the reference counter. + /// We can only have a handful of possible in-memory representations for a given column, + /// so it is fine to search them linearly. + std::unordered_map> fColumnInfos; public: - void Insert(DescriptorId_t physicalColumnID); - void Erase(DescriptorId_t physicalColumnID); + void Insert(DescriptorId_t physicalColumnId, RColumnElementBase::RIdentifier elementId); + void Erase(DescriptorId_t physicalColumnId, RColumnElementBase::RIdentifier elementId); RCluster::ColumnSet_t ToColumnSet() const; + bool HasColumnInfos(DescriptorId_t physicalColumnId) const { return fColumnInfos.count(physicalColumnId) > 0; } + const std::vector &GetColumnInfos(DescriptorId_t physicalColumnId) const + { + return fColumnInfos.at(physicalColumnId); + } }; /// Summarizes cluster-level information that are necessary to load a certain page. diff --git a/tree/ntuple/v7/inc/ROOT/RPageStorageFile.hxx b/tree/ntuple/v7/inc/ROOT/RPageStorageFile.hxx index e3b9820017824..8bf3fe69d819a 100644 --- a/tree/ntuple/v7/inc/ROOT/RPageStorageFile.hxx +++ b/tree/ntuple/v7/inc/ROOT/RPageStorageFile.hxx @@ -31,7 +31,7 @@ #include #include -class TFile; +class TDirectory; namespace ROOT { class RNTuple; // for making RPageSourceFile a friend of RNTuple @@ -99,7 +99,7 @@ protected: public: RPageSinkFile(std::string_view ntupleName, std::string_view path, const RNTupleWriteOptions &options); - RPageSinkFile(std::string_view ntupleName, TFile &file, const RNTupleWriteOptions &options); + RPageSinkFile(std::string_view ntupleName, TDirectory &fileOrDirectory, const RNTupleWriteOptions &options); RPageSinkFile(const RPageSinkFile &) = delete; RPageSinkFile &operator=(const RPageSinkFile &) = delete; RPageSinkFile(RPageSinkFile &&) = default; diff --git a/tree/ntuple/v7/src/RClusterPool.cxx b/tree/ntuple/v7/src/RClusterPool.cxx index 4045c8f8546c1..6dc2a596110da 100644 --- a/tree/ntuple/v7/src/RClusterPool.cxx +++ b/tree/ntuple/v7/src/RClusterPool.cxx @@ -97,23 +97,7 @@ void ROOT::Experimental::Internal::RClusterPool::ExecReadClusters() auto clusters = fPageSource.LoadClusters(clusterKeys); for (std::size_t i = 0; i < clusters.size(); ++i) { - // Meanwhile, the user might have requested clusters outside the look-ahead window, so that we don't - // need the cluster anymore, in which case we simply discard it right away, before moving it to the pool - bool discard; - { - std::unique_lock lock(fLockWorkQueue); - discard = std::any_of(fInFlightClusters.begin(), fInFlightClusters.end(), - [thisClusterId = clusters[i]->GetId()](auto &inFlight) { - return inFlight.fClusterKey.fClusterId == thisClusterId && inFlight.fIsExpired; - }); - } - if (discard) { - clusters[i].reset(); - // clusters[i] is now nullptr; also return this via the promise. - readItems[i].fPromise.set_value(nullptr); - } else { - readItems[i].fPromise.set_value(std::move(clusters[i])); - } + readItems[i].fPromise.set_value(std::move(clusters[i])); } readItems.erase(readItems.begin(), readItems.begin() + clusters.size()); } @@ -263,9 +247,6 @@ ROOT::Experimental::Internal::RClusterPool::GetCluster(DescriptorId_t clusterId, for (auto itr = fInFlightClusters.begin(); itr != fInFlightClusters.end(); ) { R__ASSERT(itr->fFuture.valid()); - itr->fIsExpired = - !provide.Contains(itr->fClusterKey.fClusterId) && (keep.count(itr->fClusterKey.fClusterId) == 0); - if (itr->fFuture.wait_for(std::chrono::seconds(0)) != std::future_status::ready) { // Remove the set of columns that are already scheduled for being loaded provide.Erase(itr->fClusterKey.fClusterId, itr->fClusterKey.fPhysicalColumnSet); @@ -274,8 +255,11 @@ ROOT::Experimental::Internal::RClusterPool::GetCluster(DescriptorId_t clusterId, } auto cptr = itr->fFuture.get(); - // If cptr is nullptr, the cluster expired previously and was released by the I/O thread - if (!cptr || itr->fIsExpired) { + R__ASSERT(cptr); + + const bool isExpired = + !provide.Contains(itr->fClusterKey.fClusterId) && (keep.count(itr->fClusterKey.fClusterId) == 0); + if (isExpired) { cptr.reset(); itr = fInFlightClusters.erase(itr); continue; diff --git a/tree/ntuple/v7/src/RColumnElement.cxx b/tree/ntuple/v7/src/RColumnElement.cxx index 1fd196f20fe6f..69b474c7fe66a 100644 --- a/tree/ntuple/v7/src/RColumnElement.cxx +++ b/tree/ntuple/v7/src/RColumnElement.cxx @@ -67,7 +67,7 @@ ROOT::Experimental::Internal::RColumnElementBase::GetValidBitRange(EColumnType t return std::make_pair(0, 0); } -const char *ROOT::Experimental::Internal::RColumnElementBase::GetTypeName(EColumnType type) +const char *ROOT::Experimental::Internal::RColumnElementBase::GetColumnTypeName(EColumnType type) { switch (type) { case EColumnType::kIndex64: return "Index64"; @@ -108,9 +108,9 @@ const char *ROOT::Experimental::Internal::RColumnElementBase::GetTypeName(EColum template <> std::unique_ptr -ROOT::Experimental::Internal::RColumnElementBase::Generate(EColumnType type) +ROOT::Experimental::Internal::RColumnElementBase::Generate(EColumnType onDiskType) { - switch (type) { + switch (onDiskType) { case EColumnType::kIndex64: return std::make_unique>(); case EColumnType::kIndex32: return std::make_unique>(); case EColumnType::kSwitch: return std::make_unique>(); @@ -144,7 +144,7 @@ ROOT::Experimental::Internal::RColumnElementBase::Generate(EColumnType typ case EColumnType::kReal32Trunc: return std::make_unique>(); case EColumnType::kReal32Quant: return std::make_unique>(); default: - if (type == kTestFutureType) + if (onDiskType == kTestFutureType) return std::make_unique>(); assert(false); } @@ -153,33 +153,53 @@ ROOT::Experimental::Internal::RColumnElementBase::Generate(EColumnType typ } std::unique_ptr -ROOT::Experimental::Internal::GenerateColumnElement(EColumnCppType cppType, EColumnType type) +ROOT::Experimental::Internal::GenerateColumnElement(std::type_index inMemoryType, EColumnType onDiskType) { - switch (cppType) { - case EColumnCppType::kChar: return GenerateColumnElementInternal(type); - case EColumnCppType::kBool: return GenerateColumnElementInternal(type); - case EColumnCppType::kByte: return GenerateColumnElementInternal(type); - case EColumnCppType::kUint8: return GenerateColumnElementInternal(type); - case EColumnCppType::kUint16: return GenerateColumnElementInternal(type); - case EColumnCppType::kUint32: return GenerateColumnElementInternal(type); - case EColumnCppType::kUint64: return GenerateColumnElementInternal(type); - case EColumnCppType::kInt8: return GenerateColumnElementInternal(type); - case EColumnCppType::kInt16: return GenerateColumnElementInternal(type); - case EColumnCppType::kInt32: return GenerateColumnElementInternal(type); - case EColumnCppType::kInt64: return GenerateColumnElementInternal(type); - case EColumnCppType::kFloat: return GenerateColumnElementInternal(type); - case EColumnCppType::kDouble: return GenerateColumnElementInternal(type); - case EColumnCppType::kClusterSize: return GenerateColumnElementInternal(type); - case EColumnCppType::kColumnSwitch: return GenerateColumnElementInternal(type); - default: - if (cppType == kTestFutureColumn) - return GenerateColumnElementInternal(type); - R__ASSERT(!"Invalid column cpp type"); + if (inMemoryType == std::type_index(typeid(char))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(bool))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::byte))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::uint8_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::uint16_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::uint32_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::uint64_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::int8_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::int16_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::int32_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(std::int64_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(float))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(double))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(ClusterSize_t))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(RColumnSwitch))) { + return GenerateColumnElementInternal(onDiskType); + } else if (inMemoryType == std::type_index(typeid(RTestFutureColumn))) { + return GenerateColumnElementInternal(onDiskType); + } else { + R__ASSERT(!"Invalid memory type in GenerateColumnElement"); } // never here return nullptr; } +std::unique_ptr +ROOT::Experimental::Internal::GenerateColumnElement(const RColumnElementBase::RIdentifier &elementId) +{ + return GenerateColumnElement(elementId.fInMemoryType, elementId.fOnDiskType); +} + void ROOT::Experimental::Internal::BitPacking::PackBits(void *dst, const void *src, std::size_t count, std::size_t sizeofSrc, std::size_t nDstBits) { diff --git a/tree/ntuple/v7/src/RColumnElement.hxx b/tree/ntuple/v7/src/RColumnElement.hxx index 31a768604a36b..228be6dcffd62 100644 --- a/tree/ntuple/v7/src/RColumnElement.hxx +++ b/tree/ntuple/v7/src/RColumnElement.hxx @@ -327,9 +327,9 @@ template class RColumnElement; template -std::unique_ptr GenerateColumnElementInternal(EColumnType type) +std::unique_ptr GenerateColumnElementInternal(EColumnType onDiskType) { - switch (type) { + switch (onDiskType) { case EColumnType::kIndex64: return std::make_unique>(); case EColumnType::kIndex32: return std::make_unique>(); case EColumnType::kSwitch: return std::make_unique>(); @@ -360,7 +360,7 @@ std::unique_ptr GenerateColumnElementInternal(EColumnType ty case EColumnType::kReal32Trunc: return std::make_unique>(); case EColumnType::kReal32Quant: return std::make_unique>(); default: - if (type == kTestFutureType) + if (onDiskType == kTestFutureType) return std::make_unique>(); R__ASSERT(false); } @@ -597,8 +597,10 @@ public: { throw ROOT::Experimental::RException( R__FAIL(std::string("internal error: no column mapping for this C++ type: ") + typeid(CppT).name() + " --> " + - GetTypeName(ColumnT))); + GetColumnTypeName(ColumnT))); } + + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(CppT), EColumnType::kUnknown}; } }; template <> @@ -606,6 +608,7 @@ class RColumnElement : public RColumnElementBase { public: static constexpr std::size_t kSize = sizeof(bool); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(bool), EColumnType::kUnknown}; } }; template <> @@ -613,6 +616,7 @@ class RColumnElement : public RColumnElementBa public: static constexpr std::size_t kSize = sizeof(std::byte); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::byte), EColumnType::kUnknown}; } }; template <> @@ -620,6 +624,7 @@ class RColumnElement : public RColumnElementBase { public: static constexpr std::size_t kSize = sizeof(char); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(char), EColumnType::kUnknown}; } }; template <> @@ -627,6 +632,7 @@ class RColumnElement : public RColumnElement public: static constexpr std::size_t kSize = sizeof(std::int8_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::int8_t), EColumnType::kUnknown}; } }; template <> @@ -634,6 +640,7 @@ class RColumnElement : public RColumnElemen public: static constexpr std::size_t kSize = sizeof(std::uint8_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::uint8_t), EColumnType::kUnknown}; } }; template <> @@ -641,6 +648,7 @@ class RColumnElement : public RColumnElemen public: static constexpr std::size_t kSize = sizeof(std::int16_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::int16_t), EColumnType::kUnknown}; } }; template <> @@ -648,6 +656,7 @@ class RColumnElement : public RColumnEleme public: static constexpr std::size_t kSize = sizeof(std::uint16_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::uint16_t), EColumnType::kUnknown}; } }; template <> @@ -655,6 +664,7 @@ class RColumnElement : public RColumnElemen public: static constexpr std::size_t kSize = sizeof(std::int32_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::int32_t), EColumnType::kUnknown}; } }; template <> @@ -662,6 +672,7 @@ class RColumnElement : public RColumnEleme public: static constexpr std::size_t kSize = sizeof(std::uint32_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::uint32_t), EColumnType::kUnknown}; } }; template <> @@ -669,6 +680,7 @@ class RColumnElement : public RColumnElemen public: static constexpr std::size_t kSize = sizeof(std::int64_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::int64_t), EColumnType::kUnknown}; } }; template <> @@ -676,6 +688,7 @@ class RColumnElement : public RColumnEleme public: static constexpr std::size_t kSize = sizeof(std::uint64_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(std::uint64_t), EColumnType::kUnknown}; } }; template <> @@ -683,6 +696,7 @@ class RColumnElement : public RColumnElementBase { public: static constexpr std::size_t kSize = sizeof(float); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(float), EColumnType::kUnknown}; } }; template <> @@ -690,6 +704,7 @@ class RColumnElement : public RColumnElementBase public: static constexpr std::size_t kSize = sizeof(double); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(double), EColumnType::kUnknown}; } }; template <> @@ -697,6 +712,10 @@ class RColumnElement : public: static constexpr std::size_t kSize = sizeof(ROOT::Experimental::ClusterSize_t); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final + { + return RIdentifier{typeid(ROOT::Experimental::ClusterSize_t), EColumnType::kUnknown}; + } }; template <> @@ -704,6 +723,10 @@ class RColumnElement : public: static constexpr std::size_t kSize = sizeof(ROOT::Experimental::RColumnSwitch); RColumnElement() : RColumnElementBase(kSize) {} + RIdentifier GetIdentifier() const final + { + return RIdentifier{typeid(ROOT::Experimental::RColumnSwitch), EColumnType::kUnknown}; + } }; //////////////////////////////////////////////////////////////////////////////// @@ -755,6 +778,11 @@ public: ROOT::Experimental::RColumnSwitch(ROOT::Experimental::ClusterSize_t{element.fIndex}, element.fTag); } } + + RIdentifier GetIdentifier() const final + { + return RIdentifier{typeid(ROOT::Experimental::RColumnSwitch), EColumnType::kSwitch}; + } }; template <> @@ -768,6 +796,8 @@ public: void Pack(void *dst, const void *src, std::size_t count) const final; void Unpack(void *dst, const void *src, std::size_t count) const final; + + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(bool), EColumnType::kBit}; } }; template <> @@ -801,6 +831,8 @@ public: floatArray[i] = ROOT::Experimental::Internal::HalfToFloat(val); } } + + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(float), EColumnType::kReal16}; } }; template <> @@ -834,6 +866,8 @@ public: doubleArray[i] = static_cast(ROOT::Experimental::Internal::HalfToFloat(val)); } } + + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(double), EColumnType::kReal16}; } }; template @@ -856,6 +890,8 @@ public: } bool IsMappable() const final { return kIsMappable; } + + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(T), EColumnType::kReal32Trunc}; } }; template <> @@ -1106,6 +1142,8 @@ public: // this is not the case, as the user may give us float values that are out of range. assert(nOutOfRange == 0); } + + RIdentifier GetIdentifier() const final { return RIdentifier{typeid(T), EColumnType::kReal32Quant}; } }; template <> @@ -1114,13 +1152,17 @@ class RColumnElement : public RColumnElementQu template <> class RColumnElement : public RColumnElementQuantized {}; -#define __RCOLUMNELEMENT_SPEC_BODY(CppT, BaseT, BitsOnStorage) \ - static constexpr std::size_t kSize = sizeof(CppT); \ - static constexpr std::size_t kBitsOnStorage = BitsOnStorage; \ - RColumnElement() : BaseT(kSize, kBitsOnStorage) {} \ - bool IsMappable() const final \ - { \ - return kIsMappable; \ +#define __RCOLUMNELEMENT_SPEC_BODY(CppT, ColumnT, BaseT, BitsOnStorage) \ + static constexpr std::size_t kSize = sizeof(CppT); \ + static constexpr std::size_t kBitsOnStorage = BitsOnStorage; \ + RColumnElement() : BaseT(kSize, kBitsOnStorage) {} \ + bool IsMappable() const final \ + { \ + return kIsMappable; \ + } \ + RIdentifier GetIdentifier() const final \ + { \ + return RIdentifier{typeid(CppT), ColumnT}; \ } /// These macros are used to declare `RColumnElement` template specializations below. Additional arguments can be used /// to forward template parameters to the base class, e.g. @@ -1132,14 +1174,14 @@ class RColumnElement : public RColumnElementQ template <> \ class RColumnElement : public BaseT __VA_ARGS__ { \ public: \ - __RCOLUMNELEMENT_SPEC_BODY(CppT, BaseT, BitsOnStorage) \ + __RCOLUMNELEMENT_SPEC_BODY(CppT, ColumnT, BaseT, BitsOnStorage) \ } -#define DECLARE_RCOLUMNELEMENT_SPEC_SIMPLE(CppT, ColumnT, BitsOnStorage) \ - template <> \ - class RColumnElement : public RColumnElementBase { \ - public: \ - static constexpr bool kIsMappable = true; \ - __RCOLUMNELEMENT_SPEC_BODY(CppT, RColumnElementBase, BitsOnStorage) \ +#define DECLARE_RCOLUMNELEMENT_SPEC_SIMPLE(CppT, ColumnT, BitsOnStorage) \ + template <> \ + class RColumnElement : public RColumnElementBase { \ + public: \ + static constexpr bool kIsMappable = true; \ + __RCOLUMNELEMENT_SPEC_BODY(CppT, ColumnT, RColumnElementBase, BitsOnStorage) \ } DECLARE_RCOLUMNELEMENT_SPEC(bool, EColumnType::kChar, 8, RColumnElementBoolAsUnsplitInt, ); @@ -1419,6 +1461,11 @@ public: bool IsMappable() const { return kIsMappable; } void Pack(void *, const void *, std::size_t) const {} void Unpack(void *, const void *, std::size_t) const {} + + RIdentifier GetIdentifier() const final + { + return RIdentifier{typeid(ROOT::Experimental::Internal::RTestFutureColumn), kTestFutureType}; + } }; inline void diff --git a/tree/ntuple/v7/src/RField.cxx b/tree/ntuple/v7/src/RField.cxx index 526534c794b79..7aacf27298d05 100644 --- a/tree/ntuple/v7/src/RField.cxx +++ b/tree/ntuple/v7/src/RField.cxx @@ -565,6 +565,7 @@ ROOT::Experimental::RFieldBase::RFieldBase(std::string_view name, std::string_vi fPrincipalColumn(nullptr), fTraits(isSimple ? kTraitMappable : 0) { + ROOT::Experimental::Internal::EnsureValidNameForRNTuple(name, "Field"); } std::string ROOT::Experimental::RFieldBase::GetQualifiedFieldName() const @@ -894,16 +895,6 @@ ROOT::Experimental::RFieldBase::Create(const std::string &fieldName, const std:: return R__FORWARD_RESULT(fnFail("unknown type: " + canonicalType)); } -ROOT::Experimental::RResult ROOT::Experimental::RFieldBase::EnsureValidFieldName(std::string_view fieldName) -{ - if (fieldName.empty()) { - return R__FAIL("name cannot be empty string \"\""); - } else if (fieldName.find('.') != std::string::npos) { - return R__FAIL("name '" + std::string(fieldName) + "' cannot contain dot characters '.'"); - } - return RResult::Success(); -} - const ROOT::Experimental::RFieldBase::RColumnRepresentations & ROOT::Experimental::RFieldBase::GetColumnRepresentations() const { @@ -1186,7 +1177,7 @@ ROOT::Experimental::RFieldBase::EnsureCompatibleColumnTypes(const RNTupleDescrip for (const auto &t : onDiskTypes) { if (!columnTypeNames.empty()) columnTypeNames += ", "; - columnTypeNames += std::string("`") + Internal::RColumnElementBase::GetTypeName(t) + "`"; + columnTypeNames += std::string("`") + Internal::RColumnElementBase::GetColumnTypeName(t) + "`"; } throw RException(R__FAIL("On-disk column types {" + columnTypeNames + "} for field `" + GetQualifiedFieldName() + "` cannot be matched to its in-memory type `" + GetTypeName() + "` " + diff --git a/tree/ntuple/v7/src/RMiniFile.cxx b/tree/ntuple/v7/src/RMiniFile.cxx index f5de3a53bf854..f49dc377fa2cb 100644 --- a/tree/ntuple/v7/src/RMiniFile.cxx +++ b/tree/ntuple/v7/src/RMiniFile.cxx @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -469,8 +470,8 @@ struct RTFKeyList { explicit RTFKeyList(std::uint32_t nKeys) : fNKeys(nKeys) {} }; -/// A streamed TFile object -struct RTFFile { +/// A streamed TDirectory (TFile) object +struct RTFDirectory { RUInt16BE fClassVersion{5}; RTFDatetime fDateC; RTFDatetime fDateM; @@ -490,13 +491,13 @@ struct RTFFile { } fInfoLong; }; - RTFFile() : fInfoShort() {} + RTFDirectory() : fInfoShort() {} // In case of a short TFile record (<2G), 3 padding ints are written after the UUID std::uint32_t GetSize() const { if (fClassVersion >= 1000) - return sizeof(RTFFile); + return sizeof(RTFDirectory); return 18 + sizeof(fInfoShort); } @@ -601,7 +602,7 @@ namespace Internal { /// and the TFile record need to be updated struct RTFileControlBlock { RTFHeader fHeader; - RTFFile fFileRecord; + RTFDirectory fFileRecord; std::uint64_t fSeekNTuple{0}; // Remember the offset for the keys list std::uint64_t fSeekFileRecord{0}; }; @@ -675,56 +676,90 @@ ROOT::Experimental::Internal::RMiniFileReader::GetNTuple(std::string_view ntuple return GetNTupleBare(ntupleName); } -ROOT::Experimental::RResult -ROOT::Experimental::Internal::RMiniFileReader::GetNTupleProper(std::string_view ntupleName) +/// Searches for a key with the given name and type in the key index of the given directory. +/// Return 0 if the key was not found. +std::uint64_t ROOT::Experimental::Internal::RMiniFileReader::SearchInDirectory(std::uint64_t &offsetDir, + std::string_view keyName, + std::string_view typeName) { - RTFHeader fileHeader; - ReadBuffer(&fileHeader, sizeof(fileHeader), 0); + RTFDirectory directory; + ReadBuffer(&directory, sizeof(directory), offsetDir); RTFKey key; - RTFString name; - ReadBuffer(&key, sizeof(key), fileHeader.fBEGIN); - // Skip over the entire key length, including the class name, object name, and title stored in it. - std::uint64_t offset = fileHeader.fBEGIN + key.fKeyLen; - // Skip over the name and title of the TNamed preceding the TFile entry. - ReadBuffer(&name, 1, offset); - offset += name.GetSize(); - ReadBuffer(&name, 1, offset); - offset += name.GetSize(); - RTFFile file; - ReadBuffer(&file, sizeof(file), offset); - RUInt32BE nKeys; - offset = file.GetSeekKeys(); + std::uint64_t offset = directory.GetSeekKeys(); ReadBuffer(&key, sizeof(key), offset); offset += key.fKeyLen; ReadBuffer(&nKeys, sizeof(nKeys), offset); offset += sizeof(nKeys); - bool found = false; + for (unsigned int i = 0; i < nKeys; ++i) { ReadBuffer(&key, sizeof(key), offset); auto offsetNextKey = offset + key.fKeyLen; offset += key.GetHeaderSize(); + RTFString name; ReadBuffer(&name, 1, offset); ReadBuffer(&name, name.GetSize(), offset); - if (std::string_view(name.fData, name.fLName) != kNTupleClassName) { + if (std::string_view(name.fData, name.fLName) != typeName) { offset = offsetNextKey; continue; } offset += name.GetSize(); ReadBuffer(&name, 1, offset); ReadBuffer(&name, name.GetSize(), offset); - if (std::string_view(name.fData, name.fLName) == ntupleName) { - found = true; - break; + if (std::string_view(name.fData, name.fLName) == keyName) { + return key.GetSeekKey(); } offset = offsetNextKey; } - if (!found) { + + // Not found + return 0; +} + +ROOT::Experimental::RResult +ROOT::Experimental::Internal::RMiniFileReader::GetNTupleProper(std::string_view ntuplePath) +{ + RTFHeader fileHeader; + ReadBuffer(&fileHeader, sizeof(fileHeader), 0); + + RTFKey key; + RTFString name; + ReadBuffer(&key, sizeof(key), fileHeader.fBEGIN); + // Skip over the entire key length, including the class name, object name, and title stored in it. + std::uint64_t offset = fileHeader.fBEGIN + key.fKeyLen; + // Skip over the name and title of the TNamed preceding the TFile (root TDirectory) entry. + ReadBuffer(&name, 1, offset); + offset += name.GetSize(); + ReadBuffer(&name, 1, offset); + offset += name.GetSize(); + + // split ntupleName by '/' character to open datasets in subdirectories. + std::string ntuplePathTail(ntuplePath); + if (!ntuplePathTail.empty() && ntuplePathTail[0] == '/') + ntuplePathTail = ntuplePathTail.substr(1); + auto pos = std::string::npos; + while ((pos = ntuplePathTail.find('/')) != std::string::npos) { + auto directoryName = ntuplePathTail.substr(0, pos); + ntuplePathTail.erase(0, pos + 1); + + offset = SearchInDirectory(offset, directoryName, "TDirectory"); + if (offset == 0) { + return R__FAIL("no directory named '" + std::string(directoryName) + "' in file '" + fRawFile->GetUrl() + "'"); + } + ReadBuffer(&key, sizeof(key), offset); + offset = key.GetSeekKey() + key.fKeyLen; + } + // no more '/' delimiter in ntuplePath + auto ntupleName = ntuplePathTail; + + offset = SearchInDirectory(offset, ntupleName, kNTupleClassName); + if (offset == 0) { return R__FAIL("no RNTuple named '" + std::string(ntupleName) + "' in file '" + fRawFile->GetUrl() + "'"); } + ReadBuffer(&key, sizeof(key), offset); offset = key.GetSeekKey() + key.fKeyLen; // size of a RTFNTuple version 2 (min supported version); future anchor versions can grow. @@ -1002,9 +1037,8 @@ std::uint64_t ROOT::Experimental::Internal::RNTupleFileWriter::RFileSimple::Writ void ROOT::Experimental::Internal::RNTupleFileWriter::RFileProper::Write(const void *buffer, size_t nbytes, std::int64_t offset) { - R__ASSERT(fFile); - fFile->Seek(offset); - bool rv = fFile->WriteBuffer((char *)(buffer), nbytes); + fDirectory->GetFile()->Seek(offset); + bool rv = fDirectory->GetFile()->WriteBuffer((char *)(buffer), nbytes); if (rv) throw RException(R__FAIL("WriteBuffer failed.")); } @@ -1013,7 +1047,7 @@ std::uint64_t ROOT::Experimental::Internal::RNTupleFileWriter::RFileProper::WriteKey(const void *buffer, size_t nbytes, size_t len) { std::uint64_t offsetKey; - RKeyBlob keyBlob(fFile); + RKeyBlob keyBlob(fDirectory->GetFile()); // Since it is unknown beforehand if offsetKey is beyond the 2GB limit or not, // RKeyBlob will always reserve space for a big key (version >= 1000) keyBlob.Reserve(nbytes, &offsetKey); @@ -1106,11 +1140,16 @@ ROOT::Experimental::Internal::RNTupleFileWriter::Recreate(std::string_view ntupl } std::unique_ptr -ROOT::Experimental::Internal::RNTupleFileWriter::Append(std::string_view ntupleName, TFile &file, +ROOT::Experimental::Internal::RNTupleFileWriter::Append(std::string_view ntupleName, TDirectory &fileOrDirectory, std::uint64_t maxKeySize) { + TFile *file = fileOrDirectory.GetFile(); + if (!file) + throw RException(R__FAIL("invalid attempt to add an RNTuple to a directory that is not backed by a file")); + assert(file->IsBinary()); + auto writer = std::unique_ptr(new RNTupleFileWriter(ntupleName, maxKeySize)); - writer->fFileProper.fFile = &file; + writer->fFileProper.fDirectory = &fileOrDirectory; return writer; } @@ -1124,15 +1163,15 @@ void ROOT::Experimental::Internal::RNTupleFileWriter::Commit() { if (fFileProper) { // Easy case, the ROOT file header and the RNTuple streaming is taken care of by TFile - fFileProper.fFile->WriteObject(&fNTupleAnchor, fNTupleName.c_str()); + fFileProper.fDirectory->WriteObject(&fNTupleAnchor, fNTupleName.c_str()); // Make sure the streamer info records used in the RNTuple are written to the file TBufferFile buf(TBuffer::kWrite); - buf.SetParent(fFileProper.fFile); + buf.SetParent(fFileProper.fDirectory->GetFile()); for (auto [_, info] : fStreamerInfoMap) buf.TagStreamerInfo(info); - fFileProper.fFile->Write(); + fFileProper.fDirectory->GetFile()->Write(); return; } @@ -1428,7 +1467,7 @@ void ROOT::Experimental::Internal::RNTupleFileWriter::WriteTFileSkeleton(int def // First record of the file: the TFile object at offset 100 RTFKey keyRoot(100, 0, strTFile, strFileName, strEmpty, - sizeof(RTFFile) + strFileName.GetSize() + strEmpty.GetSize() + uuid.GetSize()); + sizeof(RTFDirectory) + strFileName.GetSize() + strEmpty.GetSize() + uuid.GetSize()); std::uint32_t nbytesName = keyRoot.fKeyLen + strFileName.GetSize() + 1; fFileSimple.fControlBlock->fFileRecord.fNBytesName = nbytesName; fFileSimple.fControlBlock->fHeader.SetNbytesName(nbytesName); diff --git a/tree/ntuple/v7/src/RNTupleDescriptor.cxx b/tree/ntuple/v7/src/RNTupleDescriptor.cxx index 6c6722cbb1100..94ca004ef79ac 100644 --- a/tree/ntuple/v7/src/RNTupleDescriptor.cxx +++ b/tree/ntuple/v7/src/RNTupleDescriptor.cxx @@ -792,7 +792,7 @@ ROOT::Experimental::Internal::RNTupleDescriptorBuilder::EnsureFieldExists(Descri ROOT::Experimental::RResult ROOT::Experimental::Internal::RNTupleDescriptorBuilder::EnsureValidDescriptor() const { // Reuse field name validity check - auto validName = RFieldBase::EnsureValidFieldName(fDescriptor.GetName()); + auto validName = ROOT::Experimental::Internal::EnsureValidNameForRNTuple(fDescriptor.GetName(), "Field"); if (!validName) { return R__FORWARD_ERROR(validName); } @@ -906,10 +906,13 @@ ROOT::Experimental::Internal::RFieldDescriptorBuilder::MakeDescriptor() const } // FieldZero is usually named "" and would be a false positive here if (fField.GetParentId() != kInvalidDescriptorId) { - auto validName = RFieldBase::EnsureValidFieldName(fField.GetFieldName()); + auto validName = ROOT::Experimental::Internal::EnsureValidNameForRNTuple(fField.GetFieldName(), "Field"); if (!validName) { return R__FORWARD_ERROR(validName); } + if (fField.GetFieldName().empty()) { + return R__FAIL("name cannot be empty string \"\""); + } } return fField.Clone(); } diff --git a/tree/ntuple/v7/src/RNTupleDescriptorFmt.cxx b/tree/ntuple/v7/src/RNTupleDescriptorFmt.cxx index 646d9e5033289..21f3c11c05e64 100644 --- a/tree/ntuple/v7/src/RNTupleDescriptorFmt.cxx +++ b/tree/ntuple/v7/src/RNTupleDescriptorFmt.cxx @@ -194,7 +194,7 @@ void ROOT::Experimental::RNTupleDescriptor::PrintInfo(std::ostream &output) cons std::string nameAndType = std::string(" ") + col.fFieldName + " [#" + std::to_string(col.fColumnIndex); if (col.fRepresentationIndex > 0) nameAndType += " / R." + std::to_string(col.fRepresentationIndex); - nameAndType += "] -- " + std::string{Internal::RColumnElementBase::GetTypeName(col.fType)}; + nameAndType += "] -- " + std::string{Internal::RColumnElementBase::GetColumnTypeName(col.fType)}; std::string id = std::string("{id:") + std::to_string(col.fLogicalColumnId) + "}"; if (col.fLogicalColumnId != col.fPhysicalColumnId) id += " --alias--> " + std::to_string(col.fPhysicalColumnId); diff --git a/tree/ntuple/v7/src/RNTupleMerger.cxx b/tree/ntuple/v7/src/RNTupleMerger.cxx index 77f6e52d391a8..e20550ab7579e 100644 --- a/tree/ntuple/v7/src/RNTupleMerger.cxx +++ b/tree/ntuple/v7/src/RNTupleMerger.cxx @@ -78,22 +78,25 @@ try { // pointer we just got. } - // The "fast" option is present if and only if we don't want to change compression. - const int compression = - mergeInfo->fOptions.Contains("fast") ? kUnknownCompressionSettings : outFile->GetCompressionSettings(); - - RNTupleWriteOptions writeOpts; - writeOpts.SetUseBufferedWrite(false); - if (compression != kUnknownCompressionSettings) - writeOpts.SetCompression(compression); - auto destination = std::make_unique(ntupleName, *outFile, writeOpts); - - // If we already have an existing RNTuple, copy over its descriptor to support incremental merging - if (outNTuple) { - auto source = RPageSourceFile::CreateFromAnchor(*outNTuple); - source->Attach(); - auto desc = source->GetSharedDescriptorGuard(); - destination->InitFromDescriptor(desc.GetRef()); + const bool defaultComp = mergeInfo->fOptions.Contains("default_compression"); + const bool firstSrcComp = mergeInfo->fOptions.Contains("first_source_compression"); + if (defaultComp && firstSrcComp) { + // this should never happen through hadd, but a user may call RNTuple::Merge() from custom code... + Warning( + "RNTuple::Merge", + "Passed both options \"default_compression\" and \"first_source_compression\": only the latter will apply."); + } + int compression = kUnknownCompressionSettings; + if (firstSrcComp) { + // user passed -ff or -fk: use the same compression as the first RNTuple we find in the sources. + // (do nothing here, the compression will be fetched below) + } else if (!defaultComp) { + // compression was explicitly passed by the user: use it. + compression = outFile->GetCompressionSettings(); + } else { + // user passed no compression-related options: use default + compression = RCompressionSetting::EDefaults::kUseGeneralPurpose; + Info("RNTuple::Merge", "Using the default compression: %d", compression); } // The remaining entries are the input files @@ -108,7 +111,50 @@ try { inFile->GetName()); return -1; } - sources.push_back(RPageSourceFile::CreateFromAnchor(*anchor)); + + auto source = RPageSourceFile::CreateFromAnchor(*anchor); + if (compression == kUnknownCompressionSettings) { + // Get the compression of this RNTuple and use it as the output compression. + // We currently assume all column ranges have the same compression, so we just peek at the first one. + source->Attach(); + auto descriptor = source->GetSharedDescriptorGuard(); + auto clusterIter = descriptor->GetClusterIterable(); + auto firstCluster = clusterIter.begin(); + if (firstCluster == clusterIter.end()) { + Error("RNTuple::Merge", + "Asked to use the first source's compression as the output compression, but the " + "first source (file '%s') has an empty RNTuple, therefore the output compression could not be " + "determined.", + inFile->GetName()); + return -1; + } + auto colRangeIter = (*firstCluster).GetColumnRangeIterable(); + auto firstColRange = colRangeIter.begin(); + if (firstColRange == colRangeIter.end()) { + Error("RNTuple::Merge", + "Asked to use the first source's compression as the output compression, but the " + "first source (file '%s') has an empty RNTuple, therefore the output compression could not be " + "determined.", + inFile->GetName()); + return -1; + } + compression = (*firstColRange).fCompressionSettings; + Info("RNTuple::Merge", "Using the first RNTuple's compression: %d", compression); + } + sources.push_back(std::move(source)); + } + + RNTupleWriteOptions writeOpts; + assert(compression != kUnknownCompressionSettings); + writeOpts.SetCompression(compression); + auto destination = std::make_unique(ntupleName, *outFile, writeOpts); + + // If we already have an existing RNTuple, copy over its descriptor to support incremental merging + if (outNTuple) { + auto outSource = RPageSourceFile::CreateFromAnchor(*outNTuple); + outSource->Attach(); + auto desc = outSource->GetSharedDescriptorGuard(); + destination->InitFromDescriptor(desc.GetRef()); } // Interface conversion @@ -160,10 +206,17 @@ struct RChangeCompressionFunc { } }; +struct RCommonField { + const RFieldDescriptor *fSrc; + const RFieldDescriptor *fDst; + + RCommonField(const RFieldDescriptor *src, const RFieldDescriptor *dst) : fSrc(src), fDst(dst) {} +}; + struct RDescriptorsComparison { std::vector fExtraDstFields; std::vector fExtraSrcFields; - std::vector fCommonFields; + std::vector fCommonFields; }; struct RColumnOutInfo { @@ -175,15 +228,15 @@ struct RColumnOutInfo { using ColumnIdMap_t = std::unordered_map; struct RColumnInfoGroup { - std::vector fExtraDstColumns; - std::vector fCommonColumns; + std::vector fExtraDstColumns; + std::vector fCommonColumns; }; } // namespace // These structs cannot be in the anon namespace becase they're used in RNTupleMerger's private interface. namespace ROOT::Experimental::Internal { -struct RColumnInfo { +struct RColumnMergeInfo { // This column name is built as a dot-separated concatenation of the ancestry of // the columns' parent fields' names plus the index of the column itself. // e.g. "Muon.pt.x._0" @@ -191,6 +244,8 @@ struct RColumnInfo { DescriptorId_t fInputId; DescriptorId_t fOutputId; EColumnType fColumnType; + // If nullopt, use the default in-memory type + std::optional fInMemoryType; const RFieldDescriptor *fParentField; }; @@ -202,7 +257,7 @@ struct RNTupleMergeData { const RNTupleDescriptor &fDstDescriptor; const RNTupleDescriptor *fSrcDescriptor = nullptr; - std::vector fColumns; + std::vector fColumns; ColumnIdMap_t fColumnIdMap; NTupleSize_t fNumDstEntries = 0; @@ -220,8 +275,46 @@ struct RSealedPageMergeData { std::vector fGroups; std::vector> fBuffers; }; + +std::ostream &operator<<(std::ostream &os, const std::optional &x) +{ + if (x) { + os << '(' << x->fMin << ", " << x->fMax << ')'; + } else { + os << "(null)"; + } + return os; +} + } // namespace ROOT::Experimental::Internal +static bool IsSplitOrUnsplitVersionOf(EColumnType a, EColumnType b) +{ + // clang-format off + if (a == EColumnType::kInt16 && b == EColumnType::kSplitInt16) return true; + if (a == EColumnType::kSplitInt16 && b == EColumnType::kInt16) return true; + if (a == EColumnType::kInt32 && b == EColumnType::kSplitInt32) return true; + if (a == EColumnType::kSplitInt32 && b == EColumnType::kInt32) return true; + if (a == EColumnType::kInt64 && b == EColumnType::kSplitInt64) return true; + if (a == EColumnType::kSplitInt64 && b == EColumnType::kInt64) return true; + if (a == EColumnType::kUInt16 && b == EColumnType::kSplitUInt16) return true; + if (a == EColumnType::kSplitUInt16 && b == EColumnType::kUInt16) return true; + if (a == EColumnType::kUInt32 && b == EColumnType::kSplitUInt32) return true; + if (a == EColumnType::kSplitUInt32 && b == EColumnType::kUInt32) return true; + if (a == EColumnType::kUInt64 && b == EColumnType::kSplitUInt64) return true; + if (a == EColumnType::kSplitUInt64 && b == EColumnType::kUInt64) return true; + if (a == EColumnType::kIndex32 && b == EColumnType::kSplitIndex32) return true; + if (a == EColumnType::kSplitIndex32 && b == EColumnType::kIndex32) return true; + if (a == EColumnType::kIndex64 && b == EColumnType::kSplitIndex64) return true; + if (a == EColumnType::kSplitIndex64 && b == EColumnType::kIndex64) return true; + if (a == EColumnType::kReal32 && b == EColumnType::kSplitReal32) return true; + if (a == EColumnType::kSplitReal32 && b == EColumnType::kReal32) return true; + if (a == EColumnType::kReal64 && b == EColumnType::kSplitReal64) return true; + if (a == EColumnType::kSplitReal64 && b == EColumnType::kReal64) return true; + // clang-format on + return false; +} + /// Compares the top level fields of `dst` and `src` and determines whether they can be merged or not. /// In addition, returns the differences between `dst` and `src`'s structures static RResult @@ -236,17 +329,13 @@ CompareDescriptorStructure(const RNTupleDescriptor &dst, const RNTupleDescriptor std::vector errors; RDescriptorsComparison res; - struct RCommonField { - const RFieldDescriptor *fDst; - const RFieldDescriptor *fSrc; - }; std::vector commonFields; for (const auto &dstField : dst.GetTopLevelFields()) { const auto srcFieldId = src.FindFieldId(dstField.GetFieldName()); if (srcFieldId != kInvalidDescriptorId) { const auto &srcField = src.GetFieldDescriptor(srcFieldId); - commonFields.push_back({&dstField, &srcField}); + commonFields.push_back({&srcField, &dstField}); } else { res.fExtraDstFields.emplace_back(&dstField); } @@ -276,8 +365,8 @@ CompareDescriptorStructure(const RNTupleDescriptor &dst, const RNTupleDescriptor if (srcName != dstName) { std::stringstream ss; ss << "Field `" << fieldName - << "` is projected to a different field than a previously-seen field with the same name (old: " << dstName - << ", new: " << srcName << ")"; + << "` is projected to a different field than a previously-seen field with the same name (old: " + << dstName << ", new: " << srcName << ")"; errors.push_back(ss.str()); } } @@ -294,6 +383,7 @@ CompareDescriptorStructure(const RNTupleDescriptor &dst, const RNTupleDescriptor errors.push_back(ss.str()); } + // Require that type checksums match const auto srcTyChk = field.fSrc->GetTypeChecksum(); const auto dstTyChk = field.fDst->GetTypeChecksum(); if (srcTyChk && dstTyChk && *srcTyChk != *dstTyChk) { @@ -303,6 +393,7 @@ CompareDescriptorStructure(const RNTupleDescriptor &dst, const RNTupleDescriptor errors.push_back(ss.str()); } + // Require that type versions match const auto srcTyVer = field.fSrc->GetTypeVersion(); const auto dstTyVer = field.fDst->GetTypeVersion(); if (srcTyVer != dstTyVer) { @@ -312,6 +403,60 @@ CompareDescriptorStructure(const RNTupleDescriptor &dst, const RNTupleDescriptor << ", new: " << srcTyVer << ")"; errors.push_back(ss.str()); } + + // Require that column representations match + const auto srcNCols = field.fSrc->GetLogicalColumnIds().size(); + const auto dstNCols = field.fDst->GetLogicalColumnIds().size(); + if (srcNCols != dstNCols) { + std::stringstream ss; + ss << "Field `" << field.fSrc->GetFieldName() + << "` has a different number of columns than previously-seen field with the same name (old: " << dstNCols + << ", new: " << srcNCols << ")"; + errors.push_back(ss.str()); + } else { + for (auto i = 0u; i < srcNCols; ++i) { + const auto srcColId = field.fSrc->GetLogicalColumnIds()[i]; + const auto dstColId = field.fDst->GetLogicalColumnIds()[i]; + const auto &srcCol = src.GetColumnDescriptor(srcColId); + const auto &dstCol = dst.GetColumnDescriptor(dstColId); + // TODO(gparolini): currently we refuse to merge columns of different types unless they are Split/non-Split + // version of the same type, because we know how to treat that specific case. We should also properly handle + // different but compatible types. + if (srcCol.GetType() != dstCol.GetType() && + !IsSplitOrUnsplitVersionOf(srcCol.GetType(), dstCol.GetType())) { + std::stringstream ss; + ss << i << "-th column of field `" << field.fSrc->GetFieldName() + << "` has a different column type of the same column on the previously-seen field with the same name " + "(old: " + << RColumnElementBase::GetColumnTypeName(srcCol.GetType()) + << ", new: " << RColumnElementBase::GetColumnTypeName(dstCol.GetType()) << ")"; + errors.push_back(ss.str()); + } + if (srcCol.GetBitsOnStorage() != dstCol.GetBitsOnStorage()) { + std::stringstream ss; + ss << i << "-th column of field `" << field.fSrc->GetFieldName() + << "` has a different number of bits of the same column on the previously-seen field with the same " + "name " + "(old: " + << srcCol.GetBitsOnStorage() << ", new: " << dstCol.GetBitsOnStorage() << ")"; + errors.push_back(ss.str()); + } + if (srcCol.GetValueRange() != dstCol.GetValueRange()) { + std::stringstream ss; + ss << i << "-th column of field `" << field.fSrc->GetFieldName() + << "` has a different value range of the same column on the previously-seen field with the same name " + "(old: " + << srcCol.GetValueRange() << ", new: " << dstCol.GetValueRange() << ")"; + errors.push_back(ss.str()); + } + if (srcCol.GetRepresentationIndex() > 0) { + std::stringstream ss; + ss << i << "-th column of field `" << field.fSrc->GetFieldName() + << "` has a representation index higher than 0. This is not supported yet by the merger."; + errors.push_back(ss.str()); + } + } + } } std::string errMsg; @@ -325,8 +470,8 @@ CompareDescriptorStructure(const RNTupleDescriptor &dst, const RNTupleDescriptor return R__FAIL(errMsg); res.fCommonFields.reserve(commonFields.size()); - for (const auto &[_, srcField] : commonFields) { - res.fCommonFields.emplace_back(srcField); + for (const auto &[srcField, dstField] : commonFields) { + res.fCommonFields.emplace_back(srcField, dstField); } // TODO(gparolini): we should exhaustively check the field tree rather than just the top level fields, @@ -337,7 +482,7 @@ CompareDescriptorStructure(const RNTupleDescriptor &dst, const RNTupleDescriptor // Applies late model extension to `destination`, adding all `newFields` to it. static void ExtendDestinationModel(std::span newFields, RNTupleModel &dstModel, - RNTupleMergeData &mergeData) + RNTupleMergeData &mergeData, std::vector &commonFields) { assert(newFields.size() > 0); // no point in calling this with 0 new cols @@ -365,12 +510,20 @@ static void ExtendDestinationModel(std::span newFields } dstModel.Freeze(); mergeData.fDestination.UpdateSchema(changeset, mergeData.fNumDstEntries); + + commonFields.reserve(commonFields.size() + newFields.size()); + for (const auto *field : newFields) { + const auto newFieldInDstId = mergeData.fDstDescriptor.FindFieldId(field->GetFieldName()); + const auto &newFieldInDst = mergeData.fDstDescriptor.GetFieldDescriptor(newFieldInDstId); + commonFields.emplace_back(field, &newFieldInDst); + } } // Merges all columns appearing both in the source and destination RNTuples, just copying them if their // compression matches ("fast merge") or by unsealing and resealing them with the proper compression. void RNTupleMerger::MergeCommonColumns(RClusterPool &clusterPool, DescriptorId_t clusterId, - std::span commonColumns, RCluster::ColumnSet_t commonColumnSet, + std::span commonColumns, + const RCluster::ColumnSet_t &commonColumnSet, RSealedPageMergeData &sealedPageData, const RNTupleMergeData &mergeData) { assert(commonColumns.size() == commonColumnSet.size()); @@ -379,7 +532,7 @@ void RNTupleMerger::MergeCommonColumns(RClusterPool &clusterPool, DescriptorId_t const RCluster *cluster = clusterPool.GetCluster(clusterId, commonColumnSet); // we expect the cluster pool to contain the requested set of columns, since they were - // validated by CompareDescriptorStructures(). + // validated by CompareDescriptorStructure(). assert(cluster); const auto &clusterDesc = mergeData.fSrcDescriptor->GetClusterDescriptor(clusterId); @@ -389,8 +542,11 @@ void RNTupleMerger::MergeCommonColumns(RClusterPool &clusterPool, DescriptorId_t R__ASSERT(clusterDesc.ContainsColumn(columnId)); const auto &columnDesc = mergeData.fSrcDescriptor->GetColumnDescriptor(columnId); - const auto srcColElement = RColumnElementBase::Generate(columnDesc.GetType()); - const auto dstColElement = RColumnElementBase::Generate(column.fColumnType); + const auto srcColElement = column.fInMemoryType + ? GenerateColumnElement(*column.fInMemoryType, columnDesc.GetType()) + : RColumnElementBase::Generate(columnDesc.GetType()); + const auto dstColElement = column.fInMemoryType ? GenerateColumnElement(*column.fInMemoryType, column.fColumnType) + : RColumnElementBase::Generate(column.fColumnType); // Now get the pages for this column in this cluster const auto &pages = clusterDesc.GetPageRange(columnId); @@ -400,17 +556,15 @@ void RNTupleMerger::MergeCommonColumns(RClusterPool &clusterPool, DescriptorId_t // Each column range potentially has a distinct compression settings const auto colRangeCompressionSettings = clusterDesc.GetColumnRange(columnId).fCompressionSettings; - const bool needsCompressionChange = mergeData.fMergeOpts.fCompressionSettings != kUnknownCompressionSettings && - colRangeCompressionSettings != mergeData.fMergeOpts.fCompressionSettings; - + const bool needsCompressionChange = colRangeCompressionSettings != mergeData.fMergeOpts.fCompressionSettings; if (needsCompressionChange && mergeData.fMergeOpts.fExtraVerbose) Info("RNTuple::Merge", "Column %s: changing source compression from %d to %d", column.fColumnName.c_str(), colRangeCompressionSettings, mergeData.fMergeOpts.fCompressionSettings); - // If the column range is already uncompressed we don't need to allocate any new buffer, so we don't - // bother reserving memory for them. size_t pageBufferBaseIdx = sealedPageData.fBuffers.size(); - if (colRangeCompressionSettings != 0) + // If the column range already has the right compression we don't need to allocate any new buffer, so we don't + // bother reserving memory for them. + if (needsCompressionChange) sealedPageData.fBuffers.resize(sealedPageData.fBuffers.size() + pages.fPageInfos.size()); // Loop over the pages @@ -462,7 +616,7 @@ void RNTupleMerger::MergeCommonColumns(RClusterPool &clusterPool, DescriptorId_t // Generates default values for columns that are not present in the current source RNTuple // but are present in the destination's schema. -static void GenerateExtraDstColumns(size_t nClusterEntries, std::span extraDstColumns, +static void GenerateExtraDstColumns(size_t nClusterEntries, std::span extraDstColumns, RSealedPageMergeData &sealedPageData, const RNTupleMergeData &mergeData) { for (const auto &column : extraDstColumns) { @@ -535,8 +689,8 @@ static void GenerateExtraDstColumns(size_t nClusterEntries, std::span commonColumns, - std::span extraDstColumns, RNTupleMergeData &mergeData) +void RNTupleMerger::MergeSourceClusters(RPageSource &source, std::span commonColumns, + std::span extraDstColumns, RNTupleMergeData &mergeData) { RClusterPool clusterPool{source}; @@ -587,26 +741,77 @@ void RNTupleMerger::MergeSourceClusters(RPageSource &source, std::span ColumnInMemoryType(std::string_view fieldType, EColumnType onDiskType) +{ + if (onDiskType == EColumnType::kIndex32 || onDiskType == EColumnType::kSplitIndex32 || + onDiskType == EColumnType::kIndex64 || onDiskType == EColumnType::kSplitIndex64) + return typeid(ClusterSize_t); + + if (onDiskType == EColumnType::kSwitch) + return typeid(ROOT::Experimental::RColumnSwitch); + + if (fieldType == "bool") { + return typeid(bool); + } else if (fieldType == "std::byte") { + return typeid(std::byte); + } else if (fieldType == "char") { + return typeid(char); + } else if (fieldType == "std::int8_t") { + return typeid(std::int8_t); + } else if (fieldType == "std::uint8_t") { + return typeid(std::uint8_t); + } else if (fieldType == "std::int16_t") { + return typeid(std::int16_t); + } else if (fieldType == "std::uint16_t") { + return typeid(std::uint16_t); + } else if (fieldType == "std::int32_t") { + return typeid(std::int32_t); + } else if (fieldType == "std::uint32_t") { + return typeid(std::uint32_t); + } else if (fieldType == "std::int64_t") { + return typeid(std::int64_t); + } else if (fieldType == "std::uint64_t") { + return typeid(std::uint64_t); + } else if (fieldType == "float") { + return typeid(float); + } else if (fieldType == "double") { + return typeid(double); + } + + // if the type is not one of those above, we use the default in-memory type. + return std::nullopt; +} + // Given a field, fill `columns` and `colIdMap` with information about all columns belonging to it and its subfields. // `colIdMap` is used to map matching columns from different sources to the same output column in the destination. // We match columns by their "fully qualified name", which is the concatenation of their ancestor fields' names // and the column index. -// By this point, since we called `CompareDescriptorStructures()` earlier, we should be guaranteed that two matching +// By this point, since we called `CompareDescriptorStructure()` earlier, we should be guaranteed that two matching // columns will have at least compatible representations. -static void AddColumnsFromField(std::vector &columns, const RNTupleDescriptor &srcDesc, - RNTupleMergeData &mergeData, const RFieldDescriptor &fieldDesc, - const std::string &prefix = "") +// NOTE: srcFieldDesc and dstFieldDesc may alias. +static void AddColumnsFromField(std::vector &columns, const RNTupleDescriptor &srcDesc, + RNTupleMergeData &mergeData, const RFieldDescriptor &srcFieldDesc, + const RFieldDescriptor &dstFieldDesc, const std::string &prefix = "") { - std::string name = prefix + '.' + fieldDesc.GetFieldName(); + std::string name = prefix + '.' + srcFieldDesc.GetFieldName(); - const auto &columnIds = fieldDesc.GetLogicalColumnIds(); + const auto &columnIds = srcFieldDesc.GetLogicalColumnIds(); columns.reserve(columns.size() + columnIds.size()); - for (const auto &columnId : columnIds) { - const auto &srcColumn = srcDesc.GetColumnDescriptor(columnId); - RColumnInfo info; + // NOTE: here we can match the src and dst columns by column index because we forbid merging fields with + // different column representations. + for (auto i = 0u; i < srcFieldDesc.GetLogicalColumnIds().size(); ++i) { + // We don't want to try and merge alias columns + if (srcFieldDesc.IsProjectedField()) + continue; + + auto srcColumnId = srcFieldDesc.GetLogicalColumnIds()[i]; + const auto &srcColumn = srcDesc.GetColumnDescriptor(srcColumnId); + RColumnMergeInfo info{}; info.fColumnName = name + '.' + std::to_string(srcColumn.GetIndex()); - info.fInputId = columnId; - info.fParentField = &fieldDesc; + info.fInputId = srcColumn.GetPhysicalId(); + // Since the parent field is only relevant for extra dst columns, the choice of src or dstFieldDesc as a parent + // is arbitrary (they're the same field). + info.fParentField = &dstFieldDesc; if (auto it = mergeData.fColumnIdMap.find(info.fColumnName); it != mergeData.fColumnIdMap.end()) { info.fOutputId = it->second.fColumnId; @@ -620,17 +825,35 @@ static void AddColumnsFromField(std::vector &columns, const RNTuple // because even in that case their column representations may differ. // e.g. if the destination has a different compression than the source, an integer column might be // zigzag-encoded in the source but not in the destination. - const auto &dstColumn = (&mergeData.fDstDescriptor == &srcDesc) - ? srcColumn - : mergeData.fDstDescriptor.GetColumnDescriptor(columnId); + auto dstColumnId = dstFieldDesc.GetLogicalColumnIds()[i]; + const auto &dstColumn = mergeData.fDstDescriptor.GetColumnDescriptor(dstColumnId); info.fColumnType = dstColumn.GetType(); mergeData.fColumnIdMap[info.fColumnName] = {info.fOutputId, info.fColumnType}; } + + if (mergeData.fMergeOpts.fExtraVerbose) { + Info("RNTuple::Merge", + "Adding column %s with log.id %" PRIu64 ", phys.id %" PRIu64 ", type %s " + " -> log.id %" PRIu64 ", type %s", + info.fColumnName.c_str(), srcColumnId, srcColumn.GetPhysicalId(), + RColumnElementBase::GetColumnTypeName(srcColumn.GetType()), info.fOutputId, + RColumnElementBase::GetColumnTypeName(info.fColumnType)); + } + + // Since we disallow merging fields of different types, src and dstFieldDesc must have the same type name. + assert(srcFieldDesc.GetTypeName() == dstFieldDesc.GetTypeName()); + info.fInMemoryType = ColumnInMemoryType(srcFieldDesc.GetTypeName(), info.fColumnType); columns.emplace_back(info); } - for (const auto &field : srcDesc.GetFieldIterable(fieldDesc)) - AddColumnsFromField(columns, srcDesc, mergeData, field, name); + const auto &srcChildrenIds = srcFieldDesc.GetLinkIds(); + const auto &dstChildrenIds = dstFieldDesc.GetLinkIds(); + assert(srcChildrenIds.size() == dstChildrenIds.size()); + for (auto i = 0u; i < srcChildrenIds.size(); ++i) { + const auto &srcChild = srcDesc.GetFieldDescriptor(srcChildrenIds[i]); + const auto &dstChild = mergeData.fDstDescriptor.GetFieldDescriptor(dstChildrenIds[i]); + AddColumnsFromField(columns, srcDesc, mergeData, srcChild, dstChild, name); + } } // Converts the fields comparison data to the corresponding column information. @@ -641,10 +864,10 @@ GatherColumnInfos(const RDescriptorsComparison &descCmp, const RNTupleDescriptor { RColumnInfoGroup res; for (const RFieldDescriptor *field : descCmp.fExtraDstFields) { - AddColumnsFromField(res.fExtraDstColumns, mergeData.fDstDescriptor, mergeData, *field); + AddColumnsFromField(res.fExtraDstColumns, mergeData.fDstDescriptor, mergeData, *field, *field); } - for (const auto *field : descCmp.fCommonFields) { - AddColumnsFromField(res.fCommonColumns, srcDesc, mergeData, *field); + for (const auto &[srcField, dstField] : descCmp.fCommonFields) { + AddColumnsFromField(res.fCommonColumns, srcDesc, mergeData, *srcField, *dstField); } return res; } @@ -661,8 +884,21 @@ RNTupleMerger::RNTupleMerger() } RResult -RNTupleMerger::Merge(std::span sources, RPageSink &destination, const RNTupleMergeOptions &mergeOpts) +RNTupleMerger::Merge(std::span sources, RPageSink &destination, const RNTupleMergeOptions &mergeOptsIn) { + RNTupleMergeOptions mergeOpts = mergeOptsIn; + { + const auto dstCompSettings = destination.GetWriteOptions().GetCompression(); + if (mergeOpts.fCompressionSettings == kUnknownCompressionSettings) { + mergeOpts.fCompressionSettings = dstCompSettings; + } else if (mergeOpts.fCompressionSettings != dstCompSettings) { + return R__FAIL(std::string("The compression given to RNTupleMergeOptions is different from that of the " + "sink! (opts: ") + + std::to_string(mergeOpts.fCompressionSettings) + ", sink: " + std::to_string(dstCompSettings) + + ") This is currently unsupported."); + } + } + RNTupleMergeData mergeData{sources, destination, mergeOpts}; std::unique_ptr model; // used to initialize the schema of the output RNTuple @@ -685,7 +921,9 @@ RNTupleMerger::Merge(std::span sources, RPageSink &destination, c // Create sink from the input model if not initialized if (!destination.IsInitialized()) { - model = srcDescriptor->CreateModel(); + auto opts = RNTupleDescriptor::RCreateModelOptions(); + opts.fReconstructProjections = true; + model = srcDescriptor->CreateModel(opts); destination.Init(*model); } @@ -714,9 +952,7 @@ RNTupleMerger::Merge(std::span sources, RPageSink &destination, c if (descCmp.fExtraSrcFields.size()) { if (mergeOpts.fMergingMode == ENTupleMergingMode::kUnion) { // late model extension for all fExtraSrcFields in Union mode - ExtendDestinationModel(descCmp.fExtraSrcFields, *model, mergeData); - descCmp.fCommonFields.insert(descCmp.fCommonFields.end(), descCmp.fExtraSrcFields.begin(), - descCmp.fExtraSrcFields.end()); + ExtendDestinationModel(descCmp.fExtraSrcFields, *model, mergeData, descCmp.fCommonFields); } else if (mergeOpts.fMergingMode == ENTupleMergingMode::kStrict) { // If the current source has extra fields and we're in Strict mode, error std::string msg = "Source RNTuple has extra fields that the destination RNTuple doesn't have:"; diff --git a/tree/ntuple/v7/src/RNTupleModel.cxx b/tree/ntuple/v7/src/RNTupleModel.cxx index 03e8ec1d8265e..3116f8b058517 100644 --- a/tree/ntuple/v7/src/RNTupleModel.cxx +++ b/tree/ntuple/v7/src/RNTupleModel.cxx @@ -208,10 +208,13 @@ ROOT::Experimental::RNTupleModel::RUpdater::AddProjectedField(std::unique_ptr nameValid = RFieldBase::EnsureValidFieldName(fieldName); + RResult nameValid = ROOT::Experimental::Internal::EnsureValidNameForRNTuple(fieldName, "Field"); if (!nameValid) { nameValid.Throw(); } + if (fieldName.empty()) { + throw RException(R__FAIL("name cannot be empty string \"\"")); + } auto fieldNameStr = std::string(fieldName); if (fFieldNames.count(fieldNameStr) > 0) throw RException(R__FAIL("field name '" + fieldNameStr + "' already exists in NTuple model")); diff --git a/tree/ntuple/v7/src/RNTupleParallelWriter.cxx b/tree/ntuple/v7/src/RNTupleParallelWriter.cxx index 21fc0b89f92fc..c018002f77882 100644 --- a/tree/ntuple/v7/src/RNTupleParallelWriter.cxx +++ b/tree/ntuple/v7/src/RNTupleParallelWriter.cxx @@ -159,13 +159,13 @@ ROOT::Experimental::RNTupleParallelWriter::Recreate(std::unique_ptr ROOT::Experimental::RNTupleParallelWriter::Append(std::unique_ptr model, std::string_view ntupleName, - TFile &file, const RNTupleWriteOptions &options) + TDirectory &fileOrDirectory, const RNTupleWriteOptions &options) { if (!options.GetUseBufferedWrite()) { throw RException(R__FAIL("parallel writing requires buffering")); } - auto sink = std::make_unique(ntupleName, file, options); + auto sink = std::make_unique(ntupleName, fileOrDirectory, options); // Cannot use std::make_unique because the constructor of RNTupleParallelWriter is private. return std::unique_ptr(new RNTupleParallelWriter(std::move(model), std::move(sink))); } diff --git a/tree/ntuple/v7/src/RNTupleUtil.cxx b/tree/ntuple/v7/src/RNTupleUtil.cxx index aab1d2e331021..c8f96b347e06a 100644 --- a/tree/ntuple/v7/src/RNTupleUtil.cxx +++ b/tree/ntuple/v7/src/RNTupleUtil.cxx @@ -2,6 +2,8 @@ /// \ingroup NTuple ROOT7 /// \author Jakob Blomer & Max Orok /// \date 2020-07-14 +/// \author Vincenzo Eduardo Padulano, CERN +/// \date 2024-11-08 /// \warning This is part of the ROOT 7 prototype! It will change without notice. It might trigger earthquakes. Feedback /// is welcome! @@ -18,6 +20,8 @@ #include "ROOT/RLogger.hxx" #include "ROOT/RMiniFile.hxx" +#include +#include #include #include @@ -25,3 +29,25 @@ ROOT::Experimental::RLogChannel &ROOT::Experimental::NTupleLog() { static RLogChannel sLog("ROOT.NTuple"); return sLog; } + +ROOT::Experimental::RResult +ROOT::Experimental::Internal::EnsureValidNameForRNTuple(std::string_view name, std::string_view where) +{ + using codeAndRepr = std::pair; + constexpr static std::array forbiddenChars{codeAndRepr{"\u002E", "."}, codeAndRepr{"\u002F", "/"}, + codeAndRepr{"\u0020", "space"}, + codeAndRepr{"\u005C", "\\"}}; + + for (auto &&[code, repr] : forbiddenChars) { + if (name.find(code) != std::string_view::npos) + return R__FAIL(std::string(where) + " name '" + std::string(name) + "' cannot contain character '" + repr + + "'."); + } + + if (std::count_if(name.begin(), name.end(), [](unsigned char c) { return std::iscntrl(c); })) + return R__FAIL(std::string(where) + " name '" + std::string(name) + + "' cannot contain character classified as control character. These notably include newline, tab, " + "carriage return."); + + return RResult::Success(); +} diff --git a/tree/ntuple/v7/src/RNTupleView.cxx b/tree/ntuple/v7/src/RNTupleView.cxx index b0b9212efa78f..1318669c7e44d 100644 --- a/tree/ntuple/v7/src/RNTupleView.cxx +++ b/tree/ntuple/v7/src/RNTupleView.cxx @@ -39,8 +39,7 @@ ROOT::Experimental::Internal::GetFieldRange(const RFieldBase &field, const RPage } if (columnId == kInvalidDescriptorId) { - throw RException(R__FAIL("field iteration over empty fields is unsupported: " + - desc.GetQualifiedFieldName(field.GetOnDiskId()))); + return RNTupleGlobalRange(kInvalidNTupleIndex, kInvalidNTupleIndex); } auto arraySize = std::max(std::uint64_t(1), desc.GetFieldDescriptor(field.GetOnDiskId()).GetNRepetitions()); diff --git a/tree/ntuple/v7/src/RNTupleWriter.cxx b/tree/ntuple/v7/src/RNTupleWriter.cxx index 45b69cc6be2bb..7e6e45ef897c4 100644 --- a/tree/ntuple/v7/src/RNTupleWriter.cxx +++ b/tree/ntuple/v7/src/RNTupleWriter.cxx @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -92,10 +93,20 @@ ROOT::Experimental::RNTupleWriter::Recreate(std::initializer_list -ROOT::Experimental::RNTupleWriter::Append(std::unique_ptr model, std::string_view ntupleName, TFile &file, - const RNTupleWriteOptions &options) +ROOT::Experimental::RNTupleWriter::Append(std::unique_ptr model, std::string_view ntupleName, + TDirectory &fileOrDirectory, const RNTupleWriteOptions &options) { - auto sink = std::make_unique(ntupleName, file, options); + auto file = fileOrDirectory.GetFile(); + if (!file) { + throw RException(R__FAIL("RNTupleWriter only supports writing to a ROOT file. Cannot write into a directory " + "that is not backed by a file")); + } + if (!file->IsBinary()) { + throw RException(R__FAIL("RNTupleWriter only supports writing to a ROOT file. Cannot write into " + + std::string(file->GetName()))); + } + + auto sink = std::make_unique(ntupleName, fileOrDirectory, options); return Create(std::move(model), std::move(sink), options); } diff --git a/tree/ntuple/v7/src/RPagePool.cxx b/tree/ntuple/v7/src/RPagePool.cxx index 8403fab773f35..5f14aeae15cfb 100644 --- a/tree/ntuple/v7/src/RPagePool.cxx +++ b/tree/ntuple/v7/src/RPagePool.cxx @@ -21,18 +21,21 @@ #include #include -ROOT::Experimental::Internal::RPageRef ROOT::Experimental::Internal::RPagePool::RegisterPage(RPage page) +ROOT::Experimental::Internal::RPageRef +ROOT::Experimental::Internal::RPagePool::RegisterPage(RPage page, std::type_index inMemoryType) { std::lock_guard lockGuard(fLock); fPages.emplace_back(std::move(page)); + fPageInfos.emplace_back(RPageInfo{inMemoryType}); fReferences.emplace_back(1); return RPageRef(page, this); } -void ROOT::Experimental::Internal::RPagePool::PreloadPage(RPage page) +void ROOT::Experimental::Internal::RPagePool::PreloadPage(RPage page, std::type_index inMemoryType) { std::lock_guard lockGuard(fLock); fPages.emplace_back(std::move(page)); + fPageInfos.emplace_back(RPageInfo{inMemoryType}); fReferences.emplace_back(0); } @@ -47,8 +50,10 @@ void ROOT::Experimental::Internal::RPagePool::ReleasePage(const RPage &page) if (--fReferences[i] == 0) { fPages[i] = std::move(fPages[N - 1]); + fPageInfos[i] = fPageInfos[N - 1]; fReferences[i] = fReferences[N - 1]; - fPages.resize(N-1); + fPages.resize(N - 1); + fPageInfos.resize(N - 1); fReferences.resize(N - 1); } return; @@ -56,14 +61,17 @@ void ROOT::Experimental::Internal::RPagePool::ReleasePage(const RPage &page) R__ASSERT(false); } -ROOT::Experimental::Internal::RPageRef -ROOT::Experimental::Internal::RPagePool::GetPage(ColumnId_t columnId, NTupleSize_t globalIndex) +ROOT::Experimental::Internal::RPageRef ROOT::Experimental::Internal::RPagePool::GetPage(ColumnId_t columnId, + std::type_index inMemoryType, + NTupleSize_t globalIndex) { std::lock_guard lockGuard(fLock); unsigned int N = fPages.size(); for (unsigned int i = 0; i < N; ++i) { if (fReferences[i] < 0) continue; if (fPages[i].GetColumnId() != columnId) continue; + if (fPageInfos[i].fInMemoryType != inMemoryType) + continue; if (!fPages[i].Contains(globalIndex)) continue; fReferences[i]++; return RPageRef(fPages[i], this); @@ -71,14 +79,17 @@ ROOT::Experimental::Internal::RPagePool::GetPage(ColumnId_t columnId, NTupleSize return RPageRef(); } -ROOT::Experimental::Internal::RPageRef -ROOT::Experimental::Internal::RPagePool::GetPage(ColumnId_t columnId, RClusterIndex clusterIndex) +ROOT::Experimental::Internal::RPageRef ROOT::Experimental::Internal::RPagePool::GetPage(ColumnId_t columnId, + std::type_index inMemoryType, + RClusterIndex clusterIndex) { std::lock_guard lockGuard(fLock); unsigned int N = fPages.size(); for (unsigned int i = 0; i < N; ++i) { if (fReferences[i] < 0) continue; if (fPages[i].GetColumnId() != columnId) continue; + if (fPageInfos[i].fInMemoryType != inMemoryType) + continue; if (!fPages[i].Contains(clusterIndex)) continue; fReferences[i]++; return RPageRef(fPages[i], this); diff --git a/tree/ntuple/v7/src/RPageStorage.cxx b/tree/ntuple/v7/src/RPageStorage.cxx index 2fbbb364a721d..97796d6588d2d 100644 --- a/tree/ntuple/v7/src/RPageStorage.cxx +++ b/tree/ntuple/v7/src/RPageStorage.cxx @@ -84,28 +84,36 @@ ROOT::Experimental::RResult ROOT::Experimental::Internal::RPageSt //------------------------------------------------------------------------------ -void ROOT::Experimental::Internal::RPageSource::RActivePhysicalColumns::Insert(DescriptorId_t physicalColumnID) +void ROOT::Experimental::Internal::RPageSource::RActivePhysicalColumns::Insert( + DescriptorId_t physicalColumnId, RColumnElementBase::RIdentifier elementId) { - for (unsigned i = 0; i < fIDs.size(); ++i) { - if (fIDs[i] == physicalColumnID) { - fRefCounters[i]++; + auto [itr, _] = fColumnInfos.emplace(physicalColumnId, std::vector()); + for (auto &columnInfo : itr->second) { + if (columnInfo.fElementId == elementId) { + columnInfo.fRefCounter++; return; } } - fIDs.emplace_back(physicalColumnID); - fRefCounters.emplace_back(1); + itr->second.emplace_back(RColumnInfo{elementId, 1}); } -void ROOT::Experimental::Internal::RPageSource::RActivePhysicalColumns::Erase(DescriptorId_t physicalColumnID) +void ROOT::Experimental::Internal::RPageSource::RActivePhysicalColumns::Erase(DescriptorId_t physicalColumnId, + RColumnElementBase::RIdentifier elementId) { - for (unsigned i = 0; i < fIDs.size(); ++i) { - if (fIDs[i] == physicalColumnID) { - if (--fRefCounters[i] == 0) { - fIDs.erase(fIDs.begin() + i); - fRefCounters.erase(fRefCounters.begin() + i); + auto itr = fColumnInfos.find(physicalColumnId); + R__ASSERT(itr != fColumnInfos.end()); + for (std::size_t i = 0; i < itr->second.size(); ++i) { + if (itr->second[i].fElementId != elementId) + continue; + + itr->second[i].fRefCounter--; + if (itr->second[i].fRefCounter == 0) { + itr->second.erase(itr->second.begin() + i); + if (itr->second.empty()) { + fColumnInfos.erase(itr); } - return; } + break; } } @@ -113,8 +121,8 @@ ROOT::Experimental::Internal::RCluster::ColumnSet_t ROOT::Experimental::Internal::RPageSource::RActivePhysicalColumns::ToColumnSet() const { RCluster::ColumnSet_t result; - for (const auto &id : fIDs) - result.insert(id); + for (const auto &[physicalColumnId, _] : fColumnInfos) + result.insert(physicalColumnId); return result; } @@ -168,13 +176,13 @@ ROOT::Experimental::Internal::RPageSource::AddColumn(DescriptorId_t fieldId, RCo auto physicalId = GetSharedDescriptorGuard()->FindPhysicalColumnId(fieldId, column.GetIndex(), column.GetRepresentationIndex()); R__ASSERT(physicalId != kInvalidDescriptorId); - fActivePhysicalColumns.Insert(physicalId); + fActivePhysicalColumns.Insert(physicalId, column.GetElement()->GetIdentifier()); return ColumnHandle_t{physicalId, &column}; } void ROOT::Experimental::Internal::RPageSource::DropColumn(ColumnHandle_t columnHandle) { - fActivePhysicalColumns.Erase(columnHandle.fPhysicalId); + fActivePhysicalColumns.Erase(columnHandle.fPhysicalId, columnHandle.fColumn->GetElement()->GetIdentifier()); } void ROOT::Experimental::Internal::RPageSource::SetEntryRange(const REntryRange &range) @@ -235,52 +243,58 @@ void ROOT::Experimental::Internal::RPageSource::UnzipClusterImpl(RCluster *clust auto descriptorGuard = GetSharedDescriptorGuard(); const auto &clusterDescriptor = descriptorGuard->GetClusterDescriptor(clusterId); - std::vector> allElements; - std::atomic foundChecksumFailure{false}; + std::vector> allElements; const auto &columnsInCluster = cluster->GetAvailPhysicalColumns(); for (const auto columnId : columnsInCluster) { - const auto &columnDesc = descriptorGuard->GetColumnDescriptor(columnId); - - allElements.emplace_back(RColumnElementBase::Generate(columnDesc.GetType())); - - const auto &pageRange = clusterDescriptor.GetPageRange(columnId); - std::uint64_t pageNo = 0; - std::uint64_t firstInPage = 0; - for (const auto &pi : pageRange.fPageInfos) { - ROnDiskPage::Key key(columnId, pageNo); - auto onDiskPage = cluster->GetOnDiskPage(key); - RSealedPage sealedPage; - sealedPage.SetNElements(pi.fNElements); - sealedPage.SetHasChecksum(pi.fHasChecksum); - sealedPage.SetBufferSize(pi.fLocator.fBytesOnStorage + pi.fHasChecksum * kNBytesPageChecksum); - sealedPage.SetBuffer(onDiskPage->GetAddress()); - R__ASSERT(onDiskPage && (onDiskPage->GetSize() == sealedPage.GetBufferSize())); - - auto taskFunc = [this, columnId, clusterId, firstInPage, sealedPage, element = allElements.back().get(), - &foundChecksumFailure, - indexOffset = clusterDescriptor.GetColumnRange(columnId).fFirstElementIndex]() { - auto rv = UnsealPage(sealedPage, *element, columnId); - if (!rv) { - foundChecksumFailure = true; - return; - } - auto newPage = rv.Unwrap(); - fCounters->fSzUnzip.Add(element->GetSize() * sealedPage.GetNElements()); + // By the time we unzip a cluster, the set of active columns may have already changed wrt. to the moment when + // we requested reading the cluster. That doesn't matter much, we simply decompress what is now in the list + // of active columns. + if (!fActivePhysicalColumns.HasColumnInfos(columnId)) + continue; + const auto &columnInfos = fActivePhysicalColumns.GetColumnInfos(columnId); + + for (const auto &info : columnInfos) { + allElements.emplace_back(GenerateColumnElement(info.fElementId)); + + const auto &pageRange = clusterDescriptor.GetPageRange(columnId); + std::uint64_t pageNo = 0; + std::uint64_t firstInPage = 0; + for (const auto &pi : pageRange.fPageInfos) { + ROnDiskPage::Key key(columnId, pageNo); + auto onDiskPage = cluster->GetOnDiskPage(key); + RSealedPage sealedPage; + sealedPage.SetNElements(pi.fNElements); + sealedPage.SetHasChecksum(pi.fHasChecksum); + sealedPage.SetBufferSize(pi.fLocator.fBytesOnStorage + pi.fHasChecksum * kNBytesPageChecksum); + sealedPage.SetBuffer(onDiskPage->GetAddress()); + R__ASSERT(onDiskPage && (onDiskPage->GetSize() == sealedPage.GetBufferSize())); + + auto taskFunc = [this, columnId, clusterId, firstInPage, sealedPage, element = allElements.back().get(), + &foundChecksumFailure, + indexOffset = clusterDescriptor.GetColumnRange(columnId).fFirstElementIndex]() { + auto rv = UnsealPage(sealedPage, *element, columnId); + if (!rv) { + foundChecksumFailure = true; + return; + } + auto newPage = rv.Unwrap(); + fCounters->fSzUnzip.Add(element->GetSize() * sealedPage.GetNElements()); - newPage.SetWindow(indexOffset + firstInPage, RPage::RClusterInfo(clusterId, indexOffset)); - fPagePool.PreloadPage(std::move(newPage)); - }; + newPage.SetWindow(indexOffset + firstInPage, RPage::RClusterInfo(clusterId, indexOffset)); + fPagePool.PreloadPage(std::move(newPage), element->GetIdentifier().fInMemoryType); + }; - fTaskScheduler->AddTask(taskFunc); + fTaskScheduler->AddTask(taskFunc); - firstInPage += pi.fNElements; - pageNo++; - } // for all pages in column - } // for all columns in cluster + firstInPage += pi.fNElements; + pageNo++; + } // for all pages in column - fCounters->fNPageUnsealed.Add(cluster->GetNOnDiskPages()); + fCounters->fNPageUnsealed.Add(pageNo); + } // for all in-memory types of the column + } // for all columns in cluster fTaskScheduler->Wait(); @@ -319,7 +333,8 @@ ROOT::Experimental::Internal::RPageRef ROOT::Experimental::Internal::RPageSource::LoadPage(ColumnHandle_t columnHandle, NTupleSize_t globalIndex) { const auto columnId = columnHandle.fPhysicalId; - auto cachedPageRef = fPagePool.GetPage(columnId, globalIndex); + const auto columnElementId = columnHandle.fColumn->GetElement()->GetIdentifier(); + auto cachedPageRef = fPagePool.GetPage(columnId, columnElementId.fInMemoryType, globalIndex); if (!cachedPageRef.Get().IsNull()) return cachedPageRef; @@ -355,7 +370,8 @@ ROOT::Experimental::Internal::RPageSource::LoadPage(ColumnHandle_t columnHandle, const auto clusterId = clusterIndex.GetClusterId(); const auto idxInCluster = clusterIndex.GetIndex(); const auto columnId = columnHandle.fPhysicalId; - auto cachedPageRef = fPagePool.GetPage(columnId, clusterIndex); + const auto columnElementId = columnHandle.fColumn->GetElement()->GetIdentifier(); + auto cachedPageRef = fPagePool.GetPage(columnId, columnElementId.fInMemoryType, clusterIndex); if (!cachedPageRef.Get().IsNull()) return cachedPageRef; @@ -611,6 +627,7 @@ bool ROOT::Experimental::Internal::RWritePageMemoryManager::TryUpdate(RColumn &c ROOT::Experimental::Internal::RPageSink::RPageSink(std::string_view name, const RNTupleWriteOptions &options) : RPageStorage(name), fOptions(options.Clone()), fWritePageMemoryManager(options.GetPageBufferBudget()) { + ROOT::Experimental::Internal::EnsureValidNameForRNTuple(name, "RNTuple").ThrowOnError(); } ROOT::Experimental::Internal::RPageSink::~RPageSink() {} diff --git a/tree/ntuple/v7/src/RPageStorageDaos.cxx b/tree/ntuple/v7/src/RPageStorageDaos.cxx index 9afadc7d1b68d..6b75d5867b77f 100644 --- a/tree/ntuple/v7/src/RPageStorageDaos.cxx +++ b/tree/ntuple/v7/src/RPageStorageDaos.cxx @@ -172,13 +172,6 @@ struct RDaosContainerNTupleLocator { throw ROOT::Experimental::RException( R__FAIL("unsupported RNTuple epoch version: " + std::to_string(anchor.fVersionEpoch))); } - if (anchor.fVersionEpoch == 0) { - static std::once_flag once; - std::call_once(once, [&anchor]() { - R__LOG_WARNING(ROOT::Experimental::NTupleLog()) - << "Pre-release format version: RC " << anchor.fVersionMajor; - }); - } builder.SetOnDiskHeaderSize(anchor.fNBytesHeader); buffer = std::make_unique(anchor.fLenHeader); @@ -604,13 +597,14 @@ ROOT::Experimental::Internal::RPageSourceDaos::LoadPageImpl(ColumnHandle_t colum const auto element = columnHandle.fColumn->GetElement(); const auto elementSize = element->GetSize(); + const auto elementInMemoryType = element->GetIdentifier().fInMemoryType; if (pageInfo.fLocator.fType == RNTupleLocator::kTypePageZero) { auto pageZero = RPage::MakePageZero(columnId, elementSize); pageZero.GrowUnchecked(pageInfo.fNElements); pageZero.SetWindow(clusterInfo.fColumnOffset + pageInfo.fFirstInPage, RPage::RClusterInfo(clusterId, clusterInfo.fColumnOffset)); - return fPagePool.RegisterPage(std::move(pageZero)); + return fPagePool.RegisterPage(std::move(pageZero), elementInMemoryType); } RSealedPage sealedPage; @@ -639,7 +633,7 @@ ROOT::Experimental::Internal::RPageSourceDaos::LoadPageImpl(ColumnHandle_t colum fCurrentCluster = fClusterPool->GetCluster(clusterId, fActivePhysicalColumns.ToColumnSet()); R__ASSERT(fCurrentCluster->ContainsColumn(columnId)); - auto cachedPageRef = fPagePool.GetPage(columnId, RClusterIndex(clusterId, idxInCluster)); + auto cachedPageRef = fPagePool.GetPage(columnId, elementInMemoryType, RClusterIndex(clusterId, idxInCluster)); if (!cachedPageRef.Get().IsNull()) return cachedPageRef; @@ -659,7 +653,7 @@ ROOT::Experimental::Internal::RPageSourceDaos::LoadPageImpl(ColumnHandle_t colum newPage.SetWindow(clusterInfo.fColumnOffset + pageInfo.fFirstInPage, RPage::RClusterInfo(clusterId, clusterInfo.fColumnOffset)); fCounters->fNPageUnsealed.Inc(); - return fPagePool.RegisterPage(std::move(newPage)); + return fPagePool.RegisterPage(std::move(newPage), elementInMemoryType); } std::unique_ptr diff --git a/tree/ntuple/v7/src/RPageStorageFile.cxx b/tree/ntuple/v7/src/RPageStorageFile.cxx index fcf18ef95bff2..bbd4bc637abee 100644 --- a/tree/ntuple/v7/src/RPageStorageFile.cxx +++ b/tree/ntuple/v7/src/RPageStorageFile.cxx @@ -29,8 +29,8 @@ #include #include +#include #include -#include #include #include @@ -47,11 +47,6 @@ ROOT::Experimental::Internal::RPageSinkFile::RPageSinkFile(std::string_view ntup const RNTupleWriteOptions &options) : RPagePersistentSink(ntupleName, options) { - static std::once_flag once; - std::call_once(once, []() { - R__LOG_WARNING(NTupleLog()) << "The RNTuple file format will change. " - << "Do not store real data with this version of RNTuple!"; - }); fCompressor = std::make_unique(); EnableDefaultMetrics("RPageSinkFile"); fFeatures.fCanMergePages = true; @@ -64,11 +59,11 @@ ROOT::Experimental::Internal::RPageSinkFile::RPageSinkFile(std::string_view ntup fWriter = RNTupleFileWriter::Recreate(ntupleName, path, RNTupleFileWriter::EContainerFormat::kTFile, options); } -ROOT::Experimental::Internal::RPageSinkFile::RPageSinkFile(std::string_view ntupleName, TFile &file, +ROOT::Experimental::Internal::RPageSinkFile::RPageSinkFile(std::string_view ntupleName, TDirectory &fileOrDirectory, const RNTupleWriteOptions &options) : RPageSinkFile(ntupleName, options) { - fWriter = RNTupleFileWriter::Append(ntupleName, file, options.GetMaxKeySize()); + fWriter = RNTupleFileWriter::Append(ntupleName, fileOrDirectory, options.GetMaxKeySize()); } ROOT::Experimental::Internal::RPageSinkFile::~RPageSinkFile() {} @@ -322,12 +317,6 @@ void ROOT::Experimental::Internal::RPageSourceFile::LoadStructureImpl() if (fAnchor->GetVersionEpoch() != RNTuple::kVersionEpoch) { throw RException(R__FAIL("unsupported RNTuple epoch version: " + std::to_string(fAnchor->GetVersionEpoch()))); } - if (fAnchor->GetVersionEpoch() == 0) { - static std::once_flag once; - std::call_once(once, [this]() { - R__LOG_WARNING(NTupleLog()) << "Pre-release format version: RC " << fAnchor->GetVersionMajor(); - }); - } fDescriptorBuilder.SetOnDiskHeaderSize(fAnchor->GetNBytesHeader()); fDescriptorBuilder.AddToOnDiskFooterSize(fAnchor->GetNBytesFooter()); @@ -432,13 +421,14 @@ ROOT::Experimental::Internal::RPageSourceFile::LoadPageImpl(ColumnHandle_t colum const auto element = columnHandle.fColumn->GetElement(); const auto elementSize = element->GetSize(); + const auto elementInMemoryType = element->GetIdentifier().fInMemoryType; if (pageInfo.fLocator.fType == RNTupleLocator::kTypePageZero) { auto pageZero = RPage::MakePageZero(columnId, elementSize); pageZero.GrowUnchecked(pageInfo.fNElements); pageZero.SetWindow(clusterInfo.fColumnOffset + pageInfo.fFirstInPage, RPage::RClusterInfo(clusterId, clusterInfo.fColumnOffset)); - return fPagePool.RegisterPage(std::move(pageZero)); + return fPagePool.RegisterPage(std::move(pageZero), elementInMemoryType); } RSealedPage sealedPage; @@ -463,7 +453,7 @@ ROOT::Experimental::Internal::RPageSourceFile::LoadPageImpl(ColumnHandle_t colum fCurrentCluster = fClusterPool->GetCluster(clusterId, fActivePhysicalColumns.ToColumnSet()); R__ASSERT(fCurrentCluster->ContainsColumn(columnId)); - auto cachedPageRef = fPagePool.GetPage(columnId, RClusterIndex(clusterId, idxInCluster)); + auto cachedPageRef = fPagePool.GetPage(columnId, elementInMemoryType, RClusterIndex(clusterId, idxInCluster)); if (!cachedPageRef.Get().IsNull()) return cachedPageRef; @@ -483,7 +473,7 @@ ROOT::Experimental::Internal::RPageSourceFile::LoadPageImpl(ColumnHandle_t colum newPage.SetWindow(clusterInfo.fColumnOffset + pageInfo.fFirstInPage, RPage::RClusterInfo(clusterId, clusterInfo.fColumnOffset)); fCounters->fNPageUnsealed.Inc(); - return fPagePool.RegisterPage(std::move(newPage)); + return fPagePool.RegisterPage(std::move(newPage), elementInMemoryType); } std::unique_ptr diff --git a/tree/ntuple/v7/test/CMakeLists.txt b/tree/ntuple/v7/test/CMakeLists.txt index 1fec73f1a7c4c..8e518b91f4f87 100644 --- a/tree/ntuple/v7/test/CMakeLists.txt +++ b/tree/ntuple/v7/test/CMakeLists.txt @@ -64,7 +64,7 @@ endif() ROOT_ADD_GTEST(ntuple_view ntuple_view.cxx LIBRARIES ROOTNTuple CustomStruct) ROOT_ADD_GTEST(ntuple_zip ntuple_zip.cxx LIBRARIES ROOTNTuple CustomStruct) -ROOT_ADD_GTEST(rfield_check rfield_check.cxx LIBRARIES ROOTNTuple CustomStruct) +ROOT_ADD_GTEST(rfield_basics rfield_basics.cxx LIBRARIES ROOTNTuple CustomStruct) ROOT_ADD_GTEST(rfield_class rfield_class.cxx LIBRARIES ROOTNTuple CustomStruct Physics) ROOT_ADD_GTEST(rfield_string rfield_string.cxx LIBRARIES ROOTNTuple CustomStruct) ROOT_ADD_GTEST(rfield_variant rfield_variant.cxx LIBRARIES ROOTNTuple CustomStruct) diff --git a/tree/ntuple/v7/test/ntuple_basics.cxx b/tree/ntuple/v7/test/ntuple_basics.cxx index 48e30dafe619a..14c8d82823eb0 100644 --- a/tree/ntuple/v7/test/ntuple_basics.cxx +++ b/tree/ntuple/v7/test/ntuple_basics.cxx @@ -198,6 +198,25 @@ TEST(RNTuple, WriteReadInlinedModel) EXPECT_FLOAT_EQ(3.2, (*readvpz)[2]); } +TEST(RNTuple, WriteReadSubdir) +{ + FileRaii fileGuard("test_ntuple_writeread_subdir.root"); + + auto model = RNTupleModel::Create(); + *model->MakeField("pt") = 137.0; + { + auto file = std::unique_ptr(TFile::Open(fileGuard.GetPath().c_str(), "RECREATE")); + auto dir = file->mkdir("foo"); + auto writer = RNTupleWriter::Append(std::move(model), "ntpl", *dir); + writer->Fill(); + } + + auto reader = RNTupleReader::Open("/foo/ntpl", fileGuard.GetPath()); + EXPECT_EQ(1U, reader->GetNEntries()); + reader->LoadEntry(0); + EXPECT_FLOAT_EQ(137.0, *reader->GetModel().GetDefaultEntry().GetPtr("pt")); +} + TEST(RNTuple, FileAnchor) { FileRaii fileGuard("test_ntuple_file_anchor.root"); @@ -765,3 +784,49 @@ TEST(RNTupleWriter, ForbidModelWithSubfields) testing::HasSubstr("cannot create an RNTupleWriter from a model with registered subfields")); } } + +TEST(RNTupleWriter, ForbiddenCharactersInRNTupleName) +{ + FileRaii fileGuard("test_ntuple_writer_forbidden_characters_in_rntuple_name.root"); + + std::array names{"ntu.ple", "nt/uple", "n tuple", "ntupl\\e", "n\ntuple", "ntup\tle"}; + + for (auto &&name : names) { + try { + auto writer = RNTupleWriter::Recreate(RNTupleModel::Create(), name, fileGuard.GetPath()); + FAIL() << "Should not be able to create an RNTuple with name '" << name << "'."; + } catch (const RException &err) { + EXPECT_THAT(err.what(), + testing::HasSubstr("RNTuple name '" + std::string(name) + "' cannot contain character")); + } + } +} + +TEST(RNTuple, ForbiddenCharactersInField) +{ + std::array names{"fie.ld", "fi/eld", "f ield", "fiel\\d", "f\nield", "fi\teld"}; + + for (auto &&name : names) { + try { + auto field = RFieldBase::Create(name, "int").Unwrap(); + FAIL() << "Should not be able to create an RNTuple field with name '" << name << "'."; + } catch (const RException &err) { + EXPECT_THAT(err.what(), testing::HasSubstr("Field name '" + std::string(name) + "' cannot contain character")); + } + } +} + +TEST(RNTuple, ForbiddenCharactersInModelField) +{ + std::array names{"fie.ld", "fi/eld", "f ield", "fiel\\d", "f\nield", "fi\teld"}; + + auto model = RNTupleModel::Create(); + for (auto &&name : names) { + try { + auto field = model->MakeField(name); + FAIL() << "Should not be able to create an RNTuple field with name '" << name << "'."; + } catch (const RException &err) { + EXPECT_THAT(err.what(), testing::HasSubstr("Field name '" + std::string(name) + "' cannot contain character")); + } + } +} diff --git a/tree/ntuple/v7/test/ntuple_endian.cxx b/tree/ntuple/v7/test/ntuple_endian.cxx index de6b8637ec077..69c0b0c31547c 100644 --- a/tree/ntuple/v7/test/ntuple_endian.cxx +++ b/tree/ntuple/v7/test/ntuple_endian.cxx @@ -100,7 +100,7 @@ class RPageSourceMock : public RPageSource { RPageRef LoadPage(ColumnHandle_t columnHandle, NTupleSize_t i) final { auto page = RPageSource::UnsealPage(fPages[i], fElement, columnHandle.fPhysicalId).Unwrap(); - return fPagePool.RegisterPage(std::move(page)); + return fPagePool.RegisterPage(std::move(page), std::type_index(typeid(void))); } RPageRef LoadPage(ColumnHandle_t, ROOT::Experimental::RClusterIndex) final { return RPageRef(); } void LoadSealedPage(ROOT::Experimental::DescriptorId_t, ROOT::Experimental::RClusterIndex, RSealedPage &) final {} diff --git a/tree/ntuple/v7/test/ntuple_extended.cxx b/tree/ntuple/v7/test/ntuple_extended.cxx index e751bbf58353a..62bb4264a5256 100644 --- a/tree/ntuple/v7/test/ntuple_extended.cxx +++ b/tree/ntuple/v7/test/ntuple_extended.cxx @@ -5,6 +5,7 @@ #include #include +#include #include TEST(RNTuple, RealWorld1) @@ -75,6 +76,87 @@ TEST(RNTuple, RealWorld1) EXPECT_EQ(chksumRead, chksumWrite); } +TEST(RNTuple, Double32IMT) +{ + // Tests if parallel decompression correctly compresses the on-disk float to an in-memory double +#ifdef R__USE_IMT + IMTRAII _; +#endif + FileRaii fileGuard("test_ntuple_double32_imt.root"); + + constexpr int kNEvents = 10; + + { + auto model = RNTupleModel::Create(); + model->AddField(RFieldBase::Create("pt", "Double32_t").Unwrap()); + auto writer = RNTupleWriter::Recreate(std::move(model), "ntpl", fileGuard.GetPath()); + + auto ptrPt = writer->GetModel().GetDefaultEntry().GetPtr("pt"); + + for (int i = 0; i < kNEvents; ++i) { + *ptrPt = i; + writer->Fill(); + } + } + + auto reader = RNTupleReader::Open("ntpl", fileGuard.GetPath()); + auto viewPt = reader->GetView("pt"); + for (int i = 0; i < kNEvents; ++i) { + EXPECT_DOUBLE_EQ(i, viewPt(i)); + } +} + +TEST(RNTuple, MultiColumnExpansion) +{ + // Tests if on-disk columns that expand to multiple in-memory types are correctly handled +#ifdef R__USE_IMT + IMTRAII _; +#endif + FileRaii fileGuard("test_ntuple_multi_column_expansion.root"); + + constexpr int kNEvents = 1000; + + { + auto model = RNTupleModel::Create(); + model->AddField(RFieldBase::Create("pt", "Double32_t").Unwrap()); + RNTupleWriteOptions options; + options.SetMaxUnzippedPageSize(32); + options.SetInitialNElementsPerPage(1); + auto writer = RNTupleWriter::Recreate(std::move(model), "ntpl", fileGuard.GetPath(), options); + + auto ptrPt = writer->GetModel().GetDefaultEntry().GetPtr("pt"); + + for (int i = 0; i < kNEvents; ++i) { + *ptrPt = i; + writer->Fill(); + if (i % 50 == 0) + writer->CommitCluster(); + } + } + + auto reader = RNTupleReader::Open("ntpl", fileGuard.GetPath()); + auto viewPt = reader->GetView("pt"); + auto viewPtAsFloat = reader->GetView("pt"); + + std::random_device rd; + std::mt19937 gen(rd()); + std::vector indexes; + indexes.reserve(kNEvents); + for (unsigned int i = 0; i < kNEvents; ++i) + indexes.emplace_back(i); + std::shuffle(indexes.begin(), indexes.end(), gen); + + std::bernoulli_distribution dist(0.5); + for (auto idx : indexes) { + if (dist(gen)) { + EXPECT_DOUBLE_EQ(idx, viewPt(idx)); + EXPECT_DOUBLE_EQ(idx, viewPtAsFloat(idx)); + } else { + EXPECT_DOUBLE_EQ(idx, viewPtAsFloat(idx)); + EXPECT_DOUBLE_EQ(idx, viewPt(idx)); + } + } +} // Stress test the asynchronous cluster pool by a deliberately unfavourable read pattern TEST(RNTuple, RandomAccess) diff --git a/tree/ntuple/v7/test/ntuple_merger.cxx b/tree/ntuple/v7/test/ntuple_merger.cxx index 50f03eac74aa7..892679f64ee13 100644 --- a/tree/ntuple/v7/test/ntuple_merger.cxx +++ b/tree/ntuple/v7/test/ntuple_merger.cxx @@ -521,10 +521,10 @@ TEST(RNTupleMerger, MergeInconsistentTypes) FileRaii fileGuard1("test_ntuple_merge_in_1.root"); { auto model = RNTupleModel::Create(); - auto fieldFoo = model->MakeField("foo", 0); + auto fieldFoo = model->MakeField("foo", "0"); auto ntuple = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard1.GetPath()); for (size_t i = 0; i < 10; ++i) { - *fieldFoo = i * 123; + *fieldFoo = std::to_string(i * 123); ntuple->Fill(); } } @@ -734,8 +734,6 @@ TEST(RNTupleMerger, MergeThroughTFileMergerIncremental) TEST(RNTupleMerger, MergeThroughTFileMergerKey) { ROOT::TestSupport::CheckDiagsRAII diags; - diags.optionalDiag(kWarning, "RPageSinkFile", "The RNTuple file format will change.", false); - diags.optionalDiag(kWarning, "[ROOT.NTuple]", "Pre-release format version: RC 2", false); diags.requiredDiag(kWarning, "TFileMerger", "Merging RNTuples is experimental"); diags.requiredDiag(kError, "RNTuple::Merge", "Output file already has key, but not of type RNTuple!"); diags.requiredDiag(kError, "TFileMerger", "Could NOT merge RNTuples!"); @@ -774,8 +772,6 @@ TEST(RNTupleMerger, MergeThroughTFileMergerKey) TEST(RNTupleMerger, MergeThroughTBufferMerger) { ROOT::TestSupport::CheckDiagsRAII diags; - diags.optionalDiag(kWarning, "RPageSinkFile", "The RNTuple file format will change.", false); - diags.optionalDiag(kWarning, "[ROOT.NTuple]", "Pre-release format version: RC 2", false); diags.requiredDiag(kWarning, "TFileMerger", "Merging RNTuples is experimental"); diags.requiredDiag(kWarning, "TBufferMergerFile", "not attached to the directory", false); @@ -800,49 +796,20 @@ TEST(RNTupleMerger, MergeThroughTBufferMerger) EXPECT_EQ(reader->GetNEntries(), 10); } -static bool VerifyValidZLIB(const void *buf, size_t bufsize, size_t tgtsize) +static bool VerifyPageCompression(const std::string_view fileName, int expectedComp) { - // Mostly copy-pasted code from R__unzipZLIB - auto tgt = std::make_unique(tgtsize); - auto *src = reinterpret_cast(buf); - const auto HDRSIZE = 9; - z_stream stream = {}; - stream.next_in = (Bytef *)(&src[HDRSIZE]); - stream.avail_in = (uInt)bufsize - HDRSIZE; - stream.next_out = tgt.get(); - stream.avail_out = (uInt)tgtsize; - - auto is_valid_header_zlib = [](const uint8_t *s) { return s[0] == 'Z' && s[1] == 'L' && s[2] == Z_DEFLATED; }; - if (!is_valid_header_zlib(src)) - return false; - - int err = inflateInit(&stream); - if (err != Z_OK) - return false; - - while ((err = inflate(&stream, Z_FINISH)) != Z_STREAM_END) { - EXPECT_EQ(err, Z_OK); - if (err != Z_OK) - return false; + // Check that the advertised compression is correct + bool ok = true; + { + auto reader = RNTupleReader::Open("ntuple", fileName); + auto compSettings = reader->GetDescriptor().GetClusterDescriptor(0).GetColumnRange(0).fCompressionSettings; + if (compSettings != expectedComp) { + std::cerr << "Advertised compression is wrong: " << compSettings << " instead of " << expectedComp << "\n"; + ok = false; + } } - inflateEnd(&stream); - - return true; -} - -enum class PageCompCheckType { kUncompressed, kZlib }; - -static bool VerifyPageCompression(const std::string_view fileName, PageCompCheckType checkType) -{ - // TODO(gparolini): eventually we want to do the following check: - // auto reader = RNTupleReader::Open("ntuple", fileGuardOut.GetPath()); - // auto compSettings = reader->GetDescriptor().GetClusterDescriptor(0).GetColumnRange(0).fCompressionSettings; - // EXPECT_EQ(compSettings, kNewComp); - // but right now we don't write the correct metadata when calling Merge() so we can't trust the advertised - // compression settings to reflect the actual algorithm being used for compression. - // Therefore, for now we do a more expensive check where we try to unzip the data using the expected - // algorithm and verify that it works. + // Check that the actual compression is correct auto source = RPageSource::Create("ntuple", fileName); source->Attach(); auto descriptor = source->GetSharedDescriptorGuard(); @@ -854,11 +821,15 @@ static bool VerifyPageCompression(const std::string_view fileName, PageCompCheck sealedPage.SetBuffer(buffer.get()); source->LoadSealedPage(0, {0, 0}, sealedPage); - size_t uncompSize = sealedPage.GetNElements() * colElement->GetSize(); - if (checkType == PageCompCheckType::kZlib) - return VerifyValidZLIB(sealedPage.GetBuffer(), sealedPage.GetDataSize(), uncompSize); - else - return sealedPage.GetDataSize() == uncompSize; + // size_t uncompSize = sealedPage.GetNElements() * colElement->GetSize(); + int compAlgo = R__getCompressionAlgorithm((const unsigned char *)sealedPage.GetBuffer(), sealedPage.GetDataSize()); + if (compAlgo == ROOT::RCompressionSetting::EAlgorithm::kUndefined) + compAlgo = 0; + if (compAlgo != (expectedComp / 100)) { + std::cerr << "Actual compression is wrong: " << compAlgo << " instead of " << (expectedComp / 100) << "\n"; + ok = false; + } + return ok; } TEST(RNTupleMerger, ChangeCompression) @@ -874,9 +845,10 @@ TEST(RNTupleMerger, ChangeCompression) } } - constexpr auto kNewComp = 101; + constexpr auto kNewComp = 404; FileRaii fileGuardOutChecksum("test_ntuple_merge_changecomp_out.root"); FileRaii fileGuardOutNoChecksum("test_ntuple_merge_changecomp_out_nock.root"); + FileRaii fileGuardOutDiffComp("test_ntuple_merge_changecomp_out_diff.root"); FileRaii fileGuardOutUncomp("test_ntuple_merge_changecomp_out_uncomp.root"); { // Gather the input sources @@ -890,25 +862,98 @@ TEST(RNTupleMerger, ChangeCompression) // Create the output auto writeOpts = RNTupleWriteOptions{}; writeOpts.SetEnablePageChecksums(true); + auto destinationDifferentComp = + std::make_unique("ntuple", fileGuardOutDiffComp.GetPath(), writeOpts); + writeOpts.SetCompression(kNewComp); auto destinationChecksum = std::make_unique("ntuple", fileGuardOutChecksum.GetPath(), writeOpts); + auto destinationNoChecksum = + std::make_unique("ntuple", fileGuardOutNoChecksum.GetPath(), writeOpts); + writeOpts.SetCompression(0); auto destinationUncomp = std::make_unique("ntuple", fileGuardOutUncomp.GetPath(), writeOpts); writeOpts.SetEnablePageChecksums(false); + + RNTupleMerger merger; + auto opts = RNTupleMergeOptions{}; + opts.fCompressionSettings = kNewComp; + // This should fail because we specified a different compression than the sink + auto res = merger.Merge(sourcePtrs, *destinationDifferentComp, opts); + EXPECT_FALSE(bool(res)); + res = merger.Merge(sourcePtrs, *destinationChecksum, opts); + EXPECT_TRUE(bool(res)); + res = merger.Merge(sourcePtrs, *destinationNoChecksum, opts); + EXPECT_TRUE(bool(res)); + opts.fCompressionSettings = 0; + res = merger.Merge(sourcePtrs, *destinationUncomp, opts); + EXPECT_TRUE(bool(res)); + } + + // Check that compression is the right one + EXPECT_TRUE(VerifyPageCompression(fileGuardOutChecksum.GetPath(), kNewComp)); + EXPECT_TRUE(VerifyPageCompression(fileGuardOutNoChecksum.GetPath(), kNewComp)); + EXPECT_TRUE(VerifyPageCompression(fileGuardOutUncomp.GetPath(), 0)); +} + +TEST(RNTupleMerger, ChangeCompressionMixed) +{ + FileRaii fileGuard("test_ntuple_merge_changecomp_mixed_in.root"); + { + auto model = RNTupleModel::Create(); + auto fieldFoo = model->MakeField("foo"); + auto ntuple = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard.GetPath()); + // Craft the input so that we have one column that ends up compressed (the indices) and one that is not (the + // chars) + for (size_t i = 0; i < 10; ++i) { + *fieldFoo = (char)(i + 'A'); + ntuple->Fill(); + } + } + + FileRaii fileGuardOutChecksum("test_ntuple_merge_changecomp_mixed_out.root"); + FileRaii fileGuardOutDiffComp("test_ntuple_merge_changecomp_mixed_out_diff.root"); + FileRaii fileGuardOutNoChecksum("test_ntuple_merge_changecomp_mixed_out_nock.root"); + FileRaii fileGuardOutUncomp("test_ntuple_merge_changecomp_mixed_out_uncomp.root"); + { + // Gather the input sources + std::vector> sources; + sources.push_back(RPageSource::Create("ntuple", fileGuard.GetPath(), RNTupleReadOptions())); + sources.push_back(RPageSource::Create("ntuple", fileGuard.GetPath(), RNTupleReadOptions())); + std::vector sourcePtrs; + for (const auto &s : sources) { + sourcePtrs.push_back(s.get()); + } + + // Create the output + auto writeOpts = RNTupleWriteOptions{}; + writeOpts.SetEnablePageChecksums(true); + auto destinationChecksum = std::make_unique("ntuple", fileGuardOutChecksum.GetPath(), writeOpts); auto destinationNoChecksum = std::make_unique("ntuple", fileGuardOutNoChecksum.GetPath(), writeOpts); + writeOpts.SetCompression(101); + auto destinationDifferentComp = + std::make_unique("ntuple", fileGuardOutDiffComp.GetPath(), writeOpts); + writeOpts.SetCompression(0); + auto destinationUncomp = std::make_unique("ntuple", fileGuardOutUncomp.GetPath(), writeOpts); + writeOpts.SetEnablePageChecksums(false); RNTupleMerger merger; auto opts = RNTupleMergeOptions{}; - opts.fCompressionSettings = kNewComp; - merger.Merge(sourcePtrs, *destinationChecksum, opts); - merger.Merge(sourcePtrs, *destinationNoChecksum, opts); + auto res = merger.Merge(sourcePtrs, *destinationChecksum, opts); + EXPECT_TRUE(bool(res)); + res = merger.Merge(sourcePtrs, *destinationNoChecksum, opts); + EXPECT_TRUE(bool(res)); + opts.fCompressionSettings = 101; + res = merger.Merge(sourcePtrs, *destinationDifferentComp, opts); + EXPECT_TRUE(bool(res)); opts.fCompressionSettings = 0; - merger.Merge(sourcePtrs, *destinationUncomp, opts); + res = merger.Merge(sourcePtrs, *destinationUncomp, opts); + EXPECT_TRUE(bool(res)); } // Check that compression is the right one - EXPECT_TRUE(VerifyPageCompression(fileGuardOutChecksum.GetPath(), PageCompCheckType::kZlib)); - EXPECT_TRUE(VerifyPageCompression(fileGuardOutNoChecksum.GetPath(), PageCompCheckType::kZlib)); - EXPECT_TRUE(VerifyPageCompression(fileGuardOutUncomp.GetPath(), PageCompCheckType::kUncompressed)); + EXPECT_TRUE(VerifyPageCompression(fileGuardOutChecksum.GetPath(), 505)); + EXPECT_TRUE(VerifyPageCompression(fileGuardOutNoChecksum.GetPath(), 505)); + EXPECT_TRUE(VerifyPageCompression(fileGuardOutDiffComp.GetPath(), 101)); + EXPECT_TRUE(VerifyPageCompression(fileGuardOutUncomp.GetPath(), 0)); } TEST(RNTupleMerger, MergeLateModelExtension) @@ -1031,7 +1076,7 @@ TEST(RNTupleMerger, MergeCompression) } // Now merge the inputs - const auto kOutCompSettings = 101; + const auto kOutCompSettings = ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault; FileRaii fileGuard3("test_ntuple_merge_comp_out.root"); { // Gather the input sources @@ -1047,7 +1092,9 @@ TEST(RNTupleMerger, MergeCompression) RNTupleMerger merger; RNTupleMergeOptions opts; { - auto destination = std::make_unique("ntuple", fileGuard3.GetPath(), RNTupleWriteOptions()); + auto wopts = RNTupleWriteOptions(); + wopts.SetCompression(kOutCompSettings); + auto destination = std::make_unique("ntuple", fileGuard3.GetPath(), wopts); opts.fMergingMode = ENTupleMergingMode::kUnion; opts.fCompressionSettings = kOutCompSettings; auto res = merger.Merge(sourcePtrs, *destination, opts); @@ -1055,7 +1102,7 @@ TEST(RNTupleMerger, MergeCompression) } } - EXPECT_TRUE(VerifyPageCompression(fileGuard3.GetPath(), PageCompCheckType::kZlib)); + EXPECT_TRUE(VerifyPageCompression(fileGuard3.GetPath(), kOutCompSettings)); { FileRaii fileGuard4("test_ntuple_merge_comp_out_tfilemerger.root"); @@ -1067,6 +1114,366 @@ TEST(RNTupleMerger, MergeCompression) fileMerger.AddFile(nt2.get()); fileMerger.Merge(); - EXPECT_TRUE(VerifyPageCompression(fileGuard4.GetPath(), PageCompCheckType::kZlib)); + EXPECT_TRUE(VerifyPageCompression(fileGuard4.GetPath(), kOutCompSettings)); + } +} + +TEST(RNTupleMerger, DifferentCompatibleRepresentations) +{ + // Verify that we can merge two RNTuples with fields that have different, but compatible, column representations. + FileRaii fileGuard1("test_ntuple_merge_diff_rep_in_1.root"); + + auto model = RNTupleModel::Create(); + auto pFoo = model->MakeField("foo", 0); + auto clonedModel = model->Clone(); + { + auto ntuple = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard1.GetPath()); + for (size_t i = 0; i < 10; ++i) { + *pFoo = i * 123; + ntuple->Fill(); + } + } + + FileRaii fileGuard2("test_ntuple_merge_diff_rep_in_2.root"); + + { + auto &fieldFooDbl = clonedModel->GetMutableField("foo"); + fieldFooDbl.SetColumnRepresentatives({{EColumnType::kReal32}}); + auto ntuple = RNTupleWriter::Recreate(std::move(clonedModel), "ntuple", fileGuard2.GetPath()); + auto e = ntuple->CreateEntry(); + auto pFoo2 = e->GetPtr("foo"); + for (size_t i = 0; i < 10; ++i) { + *pFoo2 = i * 567; + ntuple->Fill(); + } + } + + // Now merge the inputs + FileRaii fileGuard3("test_ntuple_merge_diff_rep_out1.root"); + FileRaii fileGuard4("test_ntuple_merge_diff_rep_out2.root"); + { + // Gather the input sources + std::vector> sources; + sources.push_back(RPageSource::Create("ntuple", fileGuard1.GetPath())); + sources.push_back(RPageSource::Create("ntuple", fileGuard2.GetPath())); + std::vector sourcePtrs; + for (const auto &s : sources) { + sourcePtrs.push_back(s.get()); + } + + auto sourcePtrs2 = sourcePtrs; + + // Now Merge the inputs. Do both with and without compression change + RNTupleMerger merger; + { + auto wopts = RNTupleWriteOptions(); + wopts.SetCompression(0); + auto destination = std::make_unique("ntuple", fileGuard3.GetPath(), wopts); + auto opts = RNTupleMergeOptions(); + opts.fCompressionSettings = 0; + auto res = merger.Merge(sourcePtrs, *destination, opts); + // TODO(gparolini): we want to support this in the future + EXPECT_FALSE(bool(res)); + if (res.GetError()) { + EXPECT_THAT(res.GetError()->GetReport(), testing::HasSubstr("different column type")); + } + // EXPECT_TRUE(bool(res)); + } + { + auto destination = std::make_unique("ntuple", fileGuard4.GetPath(), RNTupleWriteOptions()); + auto res = merger.Merge(sourcePtrs, *destination); + // TODO(gparolini): we want to support this in the future + EXPECT_FALSE(bool(res)); + if (res.GetError()) { + EXPECT_THAT(res.GetError()->GetReport(), testing::HasSubstr("different column type")); + } + // EXPECT_TRUE(bool(res)); + } + } +} + +TEST(RNTupleMerger, MultipleRepresentations) +{ + // verify that we properly handle ntuples with multiple column representations + FileRaii fileGuard1("test_ntuple_merge_multirep_in_1.root"); + + { + auto model = RNTupleModel::Create(); + auto fldPx = RFieldBase::Create("px", "float").Unwrap(); + fldPx->SetColumnRepresentatives({{EColumnType::kReal32}, {EColumnType::kReal16}}); + model->AddField(std::move(fldPx)); + auto ptrPx = model->GetDefaultEntry().GetPtr("px"); + auto writer = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard1.GetPath()); + *ptrPx = 1.0; + writer->Fill(); + writer->CommitCluster(); + ROOT::Experimental::Internal::RFieldRepresentationModifier::SetPrimaryColumnRepresentation( + const_cast(writer->GetModel().GetConstField("px")), 1); + *ptrPx = 2.0; + writer->Fill(); + } + + // Now merge the inputs + FileRaii fileGuard2("test_ntuple_merge_multirep_out.root"); + { + // Gather the input sources + std::vector> sources; + sources.push_back(RPageSource::Create("ntuple", fileGuard1.GetPath())); + sources.push_back(RPageSource::Create("ntuple", fileGuard1.GetPath())); + std::vector sourcePtrs; + for (const auto &s : sources) { + sourcePtrs.push_back(s.get()); + } + + auto sourcePtrs2 = sourcePtrs; + + RNTupleMerger merger; + { + auto destination = std::make_unique("ntuple", fileGuard2.GetPath(), RNTupleWriteOptions()); + auto opts = RNTupleMergeOptions(); + opts.fCompressionSettings = 0; + auto res = merger.Merge(sourcePtrs, *destination, opts); + // TODO(gparolini): we want to support this in the future + // XXX: this currently fails because of a mismatch in the number of columns of dst vs src. + // Is this correct? Anyway the situation will likely change once we properly support different representation + // indices... + EXPECT_FALSE(bool(res)); + // EXPECT_TRUE(bool(res)); + } + } +} + +TEST(RNTupleMerger, Double32) +{ + // Verify that we can merge two RNTuples with fields that have different, but compatible, column representations. + FileRaii fileGuard1("test_ntuple_merge_d32_in_1.root"); + + { + auto model = RNTupleModel::Create(); + auto pFoo = model->MakeField("foo", 0); + auto ntuple = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard1.GetPath()); + for (size_t i = 0; i < 10; ++i) { + *pFoo = i * 123; + ntuple->Fill(); + } + } + + FileRaii fileGuard2("test_ntuple_merge_d32_in_2.root"); + + { + auto model = RNTupleModel::Create(); + auto pFoo = model->MakeField("foo", 0); + auto ntuple = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard2.GetPath()); + for (size_t i = 0; i < 10; ++i) { + *pFoo = i * 321; + ntuple->Fill(); + } + } + + // Now merge the inputs + FileRaii fileGuard3("test_ntuple_merge_d32_out1.root"); + FileRaii fileGuard4("test_ntuple_merge_d32_out2.root"); + { + // Gather the input sources + std::vector> sources; + sources.push_back(RPageSource::Create("ntuple", fileGuard1.GetPath())); + sources.push_back(RPageSource::Create("ntuple", fileGuard2.GetPath())); + std::vector sourcePtrs; + for (const auto &s : sources) { + sourcePtrs.push_back(s.get()); + } + + auto sourcePtrs2 = sourcePtrs; + + // Now Merge the inputs. Do both with and without compression change + RNTupleMerger merger; + { + auto wopts = RNTupleWriteOptions(); + wopts.SetCompression(0); + auto destination = std::make_unique("ntuple", fileGuard3.GetPath(), wopts); + auto opts = RNTupleMergeOptions(); + opts.fCompressionSettings = 0; + auto res = merger.Merge(sourcePtrs, *destination, opts); + EXPECT_TRUE(bool(res)); + } + { + auto ntuple = RNTupleReader::Open("ntuple", fileGuard3.GetPath()); + auto foo = ntuple->GetModel().GetDefaultEntry().GetPtr("foo"); + + for (int i = 0; i < 10; ++i) { + ntuple->LoadEntry(i); + ASSERT_DOUBLE_EQ(*foo, i * 123); + } + for (int i = 10; i < 20; ++i) { + ntuple->LoadEntry(i); + ASSERT_DOUBLE_EQ(*foo, (i - 10) * 321); + } + } + { + auto destination = std::make_unique("ntuple", fileGuard4.GetPath(), RNTupleWriteOptions()); + auto res = merger.Merge(sourcePtrs, *destination); + EXPECT_TRUE(bool(res)); + } + { + auto ntuple = RNTupleReader::Open("ntuple", fileGuard4.GetPath()); + auto foo = ntuple->GetModel().GetDefaultEntry().GetPtr("foo"); + + for (int i = 0; i < 10; ++i) { + ntuple->LoadEntry(i); + ASSERT_DOUBLE_EQ(*foo, i * 123); + } + for (int i = 10; i < 20; ++i) { + ntuple->LoadEntry(i); + ASSERT_DOUBLE_EQ(*foo, (i - 10) * 321); + } + } + } +} + +TEST(RNTupleMerger, MergeProjectedFields) +{ + // Verify that the projected fields get treated properly by the merge (i.e. we don't try and merge the alias columns + // but we preserve the projections) + FileRaii fileGuard1("test_ntuple_merge_proj_in_1.root"); + { + auto model = RNTupleModel::Create(); + auto fieldFoo = model->MakeField("foo", 0); + auto projBar = RFieldBase::Create("bar", "int").Unwrap(); + model->AddProjectedField(std::move(projBar), [](const std::string &) { return "foo"; }); + auto ntuple = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard1.GetPath()); + for (size_t i = 0; i < 10; ++i) { + *fieldFoo = i * 123; + ntuple->Fill(); + } + } + + FileRaii fileGuard2("test_ntuple_merge_proj_out.root"); + { + // Gather the input sources + std::vector> sources; + sources.push_back(RPageSource::Create("ntuple", fileGuard1.GetPath(), RNTupleReadOptions())); + sources.push_back(RPageSource::Create("ntuple", fileGuard1.GetPath(), RNTupleReadOptions())); + std::vector sourcePtrs; + for (const auto &s : sources) { + sourcePtrs.push_back(s.get()); + } + + // Now Merge the inputs + auto destination = std::make_unique("ntuple", fileGuard2.GetPath(), RNTupleWriteOptions()); + RNTupleMerger merger; + auto res = merger.Merge(sourcePtrs, *destination); + EXPECT_TRUE(bool(res)); + } + + { + auto ntuple1 = RNTupleReader::Open("ntuple", fileGuard1.GetPath()); + auto ntuple2 = RNTupleReader::Open("ntuple", fileGuard2.GetPath()); + ASSERT_EQ(ntuple1->GetNEntries() + ntuple1->GetNEntries(), ntuple2->GetNEntries()); + + auto foo1 = ntuple1->GetModel().GetDefaultEntry().GetPtr("foo"); + auto foo2 = ntuple2->GetModel().GetDefaultEntry().GetPtr("foo"); + + auto bar1 = ntuple1->GetModel().GetDefaultEntry().GetPtr("bar"); + auto bar2 = ntuple2->GetModel().GetDefaultEntry().GetPtr("bar"); + + for (auto i = 0u; i < ntuple2->GetNEntries(); ++i) { + ntuple1->LoadEntry(i % ntuple1->GetNEntries()); + ntuple2->LoadEntry(i); + ASSERT_EQ(*foo1, *foo2); + ASSERT_EQ(*bar1, *bar2); + } + } +} + +struct RNTupleMergerCheckEncoding : public ::testing::TestWithParam> {}; + +TEST_P(RNTupleMergerCheckEncoding, CorrectEncoding) +{ + const auto [compInput0, compInput1, compOutput, useDefaultComp] = GetParam(); + int expectedComp = useDefaultComp ? 505 : compOutput; + + // Verify that if the encoding of the inputs' fields is properly converted to match the output file's compression + // (e.g. if we merge a compressed RNTuple with SplitInts and output to an uncompressed one, these should map to + // Ints). + FileRaii fileGuard1("test_ntuple_merger_enc_in_1.root"); + { + auto model = RNTupleModel::Create(); + auto fieldInt = model->MakeField("int"); + auto fieldFloat = model->MakeField("float"); + auto fieldVec = model->MakeField>("vec"); + auto writeOpts = RNTupleWriteOptions(); + writeOpts.SetCompression(compInput0); + auto ntuple = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard1.GetPath(), writeOpts); + for (size_t i = 0; i < 100; ++i) { + *fieldInt = i * 123; + *fieldFloat = i * 123; + *fieldVec = std::vector{i, 2 * i, 3 * i}; + ntuple->Fill(); + } + } + + FileRaii fileGuard2("test_ntuple_merger_enc_in_2.root"); + { + auto model = RNTupleModel::Create(); + auto fieldFloat = model->MakeField("float"); + auto fieldVec = model->MakeField>("vec"); + auto fieldInt = model->MakeField("int"); + auto writeOpts = RNTupleWriteOptions(); + writeOpts.SetCompression(compInput1); + auto ntuple = RNTupleWriter::Recreate(std::move(model), "ntuple", fileGuard2.GetPath(), writeOpts); + for (size_t i = 0; i < 100; ++i) { + *fieldInt = i * 567; + *fieldFloat = i * 567; + *fieldVec = std::vector{4 * i, 5 * i, 6 * i}; + ntuple->Fill(); + } + } + + FileRaii fileGuard3("test_ntuple_merger_enc_out_3.root"); + { + auto nt1 = std::unique_ptr(TFile::Open(fileGuard1.GetPath().c_str())); + auto nt2 = std::unique_ptr(TFile::Open(fileGuard2.GetPath().c_str())); + TFileMerger fileMerger(kFALSE, kFALSE); + fileMerger.OutputFile(fileGuard3.GetPath().c_str(), "RECREATE", compOutput); + fileMerger.AddFile(nt1.get()); + fileMerger.AddFile(nt2.get()); + // If `useDefaultComp` is true, it's as if we were calling hadd without a -f* flag + if (useDefaultComp) + fileMerger.SetMergeOptions(TString("default_compression")); + fileMerger.Merge(); + + EXPECT_TRUE(VerifyPageCompression(fileGuard3.GetPath(), expectedComp)); + } + + { + auto reader = RNTupleReader::Open("ntuple", fileGuard3.GetPath()); + auto pInt = reader->GetView("int"); + auto pFloat = reader->GetView("float"); + auto pVec = reader->GetView>("vec"); + + for (size_t i = 0; i < 100; ++i) { + EXPECT_EQ(pInt(i), i * 123); + EXPECT_FLOAT_EQ(pFloat(i), i * 123); + std::vector v{i, 2 * i, 3 * i}; + EXPECT_EQ(pVec(i), v); + } + for (size_t j = 100; j < 200; ++j) { + size_t i = j - 100; + EXPECT_EQ(pInt(j), i * 567); + EXPECT_FLOAT_EQ(pFloat(j), i * 567); + std::vector v{4 * i, 5 * i, 6 * i}; + EXPECT_EQ(pVec(j), v); + } } } + +INSTANTIATE_TEST_SUITE_P(Seq, RNTupleMergerCheckEncoding, + ::testing::Combine( + // compression of source 1 + ::testing::Values(0, 101, 207, 404, 505), + // compression of source 2 + ::testing::Values(0, 101, 207, 404, 505), + // compression of output TFile + ::testing::Values(0, 101, 207, 404, 505), + // use default compression + ::testing::Values(true, false))); diff --git a/tree/ntuple/v7/test/ntuple_minifile.cxx b/tree/ntuple/v7/test/ntuple_minifile.cxx index dc55ef3e5b7d2..53114d4321f6c 100644 --- a/tree/ntuple/v7/test/ntuple_minifile.cxx +++ b/tree/ntuple/v7/test/ntuple_minifile.cxx @@ -121,6 +121,60 @@ TEST(MiniFile, Proper) EXPECT_EQ(footer, buf); } +TEST(MiniFile, Directory) +{ + FileRaii fileGuard("test_ntuple_minifile_directory.root"); + + std::unique_ptr file(TFile::Open(fileGuard.GetPath().c_str(), "RECREATE")); + auto directory = file->mkdir("foo"); + + auto writer = RNTupleFileWriter::Append("MyNTuple", *directory, RNTupleWriteOptions::kDefaultMaxKeySize); + + char header = 'h'; + char footer = 'f'; + char blob = 'b'; + auto offHeader = writer->WriteNTupleHeader(&header, 1, 1); + auto offBlob = writer->WriteBlob(&blob, 1, 1); + auto offFooter = writer->WriteNTupleFooter(&footer, 1, 1); + writer->Commit(); + + auto rawFile = RRawFile::Create(fileGuard.GetPath()); + RMiniFileReader reader(rawFile.get()); + EXPECT_FALSE(reader.GetNTuple("MyNTuple")); + EXPECT_FALSE(reader.GetNTuple("bar/MyNTuple")); + EXPECT_FALSE(reader.GetNTuple("foo/bar/MyNTuple")); + auto ntuple = reader.GetNTuple("foo/MyNTuple").Unwrap(); + EXPECT_EQ(offHeader, ntuple.GetSeekHeader()); + EXPECT_EQ(offFooter, ntuple.GetSeekFooter()); + + char buf; + reader.ReadBuffer(&buf, 1, offBlob); + EXPECT_EQ(blob, buf); + reader.ReadBuffer(&buf, 1, offHeader); + EXPECT_EQ(header, buf); + reader.ReadBuffer(&buf, 1, offFooter); + EXPECT_EQ(footer, buf); + + file = std::unique_ptr(TFile::Open(fileGuard.GetPath().c_str(), "UPDATE")); + file->mkdir("foo/bar"); + directory = file->GetDirectory("foo/bar"); + writer = RNTupleFileWriter::Append("MyNTuple2", *directory, RNTupleWriteOptions::kDefaultMaxKeySize); + offHeader = writer->WriteNTupleHeader(&header, 1, 1); + offFooter = writer->WriteNTupleFooter(&footer, 1, 1); + writer->Commit(); + + rawFile = RRawFile::Create(fileGuard.GetPath()); + RMiniFileReader reader2(rawFile.get()); + EXPECT_FALSE(reader2.GetNTuple("foo/bar")); + ntuple = reader2.GetNTuple("foo/bar/MyNTuple2").Unwrap(); + EXPECT_EQ(offHeader, ntuple.GetSeekHeader()); + EXPECT_EQ(offFooter, ntuple.GetSeekFooter()); + + ntuple = reader2.GetNTuple("/foo/bar/MyNTuple2").Unwrap(); + EXPECT_EQ(offHeader, ntuple.GetSeekHeader()); + EXPECT_EQ(offFooter, ntuple.GetSeekFooter()); +} + TEST(MiniFile, SimpleKeys) { FileRaii fileGuard("test_ntuple_minifile_simple_keys.root"); diff --git a/tree/ntuple/v7/test/ntuple_model.cxx b/tree/ntuple/v7/test/ntuple_model.cxx index be0a3dd37843a..f61f5e3dc06b3 100644 --- a/tree/ntuple/v7/test/ntuple_model.cxx +++ b/tree/ntuple/v7/test/ntuple_model.cxx @@ -15,7 +15,7 @@ TEST(RNTupleModel, EnforceValidFieldNames) auto field3 = model->MakeField("pt.pt", 42.0); FAIL() << "field name with periods should throw"; } catch (const RException &err) { - EXPECT_THAT(err.what(), testing::HasSubstr("name 'pt.pt' cannot contain dot characters '.'")); + EXPECT_THAT(err.what(), testing::HasSubstr("name 'pt.pt' cannot contain character '.'")); } // Previous failures to create 'pt' should not block the name diff --git a/tree/ntuple/v7/test/ntuple_pages.cxx b/tree/ntuple/v7/test/ntuple_pages.cxx index 37e8bf5841bfa..3dd5211ada41d 100644 --- a/tree/ntuple/v7/test/ntuple_pages.cxx +++ b/tree/ntuple/v7/test/ntuple_pages.cxx @@ -17,7 +17,7 @@ TEST(Pages, Pool) RPagePool pool; { - auto pageRef = pool.GetPage(0, 0); + auto pageRef = pool.GetPage(0, std::type_index(typeid(void)), 0); EXPECT_TRUE(pageRef.Get().IsNull()); } // returning empty page should not crash @@ -29,26 +29,30 @@ TEST(Pages, Pool) EXPECT_FALSE(page.IsNull()); { - auto registeredPage = pool.RegisterPage(std::move(page)); + auto registeredPage = pool.RegisterPage(std::move(page), std::type_index(typeid(void))); { - auto pageRef = pool.GetPage(0, 0); + auto pageRef = pool.GetPage(0, std::type_index(typeid(void)), 0); EXPECT_TRUE(pageRef.Get().IsNull()); - pageRef = pool.GetPage(0, 55); + pageRef = pool.GetPage(0, std::type_index(typeid(void)), 55); EXPECT_TRUE(pageRef.Get().IsNull()); - pageRef = pool.GetPage(1, 55); + pageRef = pool.GetPage(1, std::type_index(typeid(int)), 55); + EXPECT_TRUE(pageRef.Get().IsNull()); + pageRef = pool.GetPage(1, std::type_index(typeid(void)), 55); EXPECT_FALSE(pageRef.Get().IsNull()); EXPECT_EQ(50U, pageRef.Get().GetGlobalRangeFirst()); EXPECT_EQ(59U, pageRef.Get().GetGlobalRangeLast()); EXPECT_EQ(10U, pageRef.Get().GetClusterRangeFirst()); EXPECT_EQ(19U, pageRef.Get().GetClusterRangeLast()); - auto pageRef2 = pool.GetPage(1, ROOT::Experimental::RClusterIndex(0, 15)); + auto pageRef2 = pool.GetPage(1, std::type_index(typeid(void)), ROOT::Experimental::RClusterIndex(0, 15)); + EXPECT_TRUE(pageRef2.Get().IsNull()); + pageRef2 = pool.GetPage(1, std::type_index(typeid(int)), ROOT::Experimental::RClusterIndex(2, 15)); EXPECT_TRUE(pageRef2.Get().IsNull()); - pageRef2 = pool.GetPage(1, ROOT::Experimental::RClusterIndex(2, 15)); + pageRef2 = pool.GetPage(1, std::type_index(typeid(void)), ROOT::Experimental::RClusterIndex(2, 15)); EXPECT_FALSE(pageRef2.Get().IsNull()); } } - auto pageRef = pool.GetPage(1, 55); + auto pageRef = pool.GetPage(1, std::type_index(typeid(void)), 55); EXPECT_TRUE(pageRef.Get().IsNull()); } diff --git a/tree/ntuple/v7/test/ntuple_storage.cxx b/tree/ntuple/v7/test/ntuple_storage.cxx index b93a23263cd40..26f229f424197 100644 --- a/tree/ntuple/v7/test/ntuple_storage.cxx +++ b/tree/ntuple/v7/test/ntuple_storage.cxx @@ -487,7 +487,7 @@ TEST(RNTuple, WritePageBudget) #ifdef R__HAS_DAVIX TEST(RNTuple, OpenHTTP) { - std::unique_ptr file(TFile::Open("http://root.cern/files/tutorials/ntpl004_dimuon_v1rc3.root")); + std::unique_ptr file(TFile::Open("http://root.cern/files/tutorials/ntpl004_dimuon_v1.root")); auto Events = std::unique_ptr(file->Get("Events")); auto model = RNTupleModel::Create(); model->MakeField>("nMuon"); diff --git a/tree/ntuple/v7/test/ntuple_storage_daos.cxx b/tree/ntuple/v7/test/ntuple_storage_daos.cxx index 40201fdd8842c..4a49da6699fff 100644 --- a/tree/ntuple/v7/test/ntuple_storage_daos.cxx +++ b/tree/ntuple/v7/test/ntuple_storage_daos.cxx @@ -26,7 +26,6 @@ class RPageStorageDaos : public ::testing::Test { // Initialized at the start of each test to expect diagnostic messages from TestSupport fRootDiags.optionalDiag(kWarning, "ROOT::Experimental::Internal::RPageSinkDaos::RPageSinkDaos", "The DAOS backend is experimental and still under development.", false); - fRootDiags.optionalDiag(kWarning, "[ROOT.NTuple]", "Pre-release format version: RC 3", false); fRootDiags.optionalDiag(kWarning, "in int daos_init()", "This RNTuple build uses libdaos_mock. Use only for testing!"); } diff --git a/tree/ntuple/v7/test/ntuple_view.cxx b/tree/ntuple/v7/test/ntuple_view.cxx index 2c5c07e9bb967..4b4fdeb2d592d 100644 --- a/tree/ntuple/v7/test/ntuple_view.cxx +++ b/tree/ntuple/v7/test/ntuple_view.cxx @@ -57,6 +57,73 @@ TEST(RNTuple, View) EXPECT_EQ(3, n); } +TEST(RNTuple, CollectionView) +{ + FileRaii fileGuard("test_ntuple_collection_view.root"); + + { + auto model = RNTupleModel::Create(); + auto fieldJets = model->MakeField>("jets"); + *fieldJets = {1, 2, 3}; + + auto writer = RNTupleWriter::Recreate(std::move(model), "myNTuple", fileGuard.GetPath()); + writer->Fill(); + *fieldJets = {4, 5}; + writer->Fill(); + writer->CommitCluster(); + fieldJets->clear(); + writer->Fill(); + *fieldJets = {6, 7, 8, 9}; + writer->Fill(); + } + + auto reader = RNTupleReader::Open("myNTuple", fileGuard.GetPath()); + ASSERT_EQ(4, reader->GetNEntries()); + auto viewJets = reader->GetCollectionView("jets"); + auto viewJetsItems = viewJets.GetView("_0"); + + // The call operator returns the size of the collection. + EXPECT_EQ(3, viewJets(0)); + EXPECT_EQ(2, viewJets(1)); + EXPECT_EQ(0, viewJets(2)); + EXPECT_EQ(4, viewJets(3)); + EXPECT_EQ(4, viewJets(RClusterIndex(1, 1))); + + // Via the collection range, we can get the items. + auto range = viewJets.GetCollectionRange(1); + EXPECT_EQ(2, range.size()); + EXPECT_EQ(RClusterIndex(0, 3), *range.begin()); + EXPECT_EQ(RClusterIndex(0, 5), *range.end()); + + std::int32_t expected = 4; + for (auto &&index : range) { + EXPECT_EQ(expected, viewJetsItems(index)); + expected++; + } + + // The same items can be bulk-read. + auto bulk = viewJetsItems.CreateBulk(); + auto mask = std::make_unique(range.size()); + std::fill(mask.get(), mask.get() + range.size(), true); + std::int32_t *values = static_cast(bulk.ReadBulk(*range.begin(), mask.get(), range.size())); + for (std::size_t i = 0; i < range.size(); i++) { + EXPECT_EQ(i + 4, values[i]); + } + + // The pointer can be adopted by an RVec. + ROOT::RVec v(values, range.size()); + ROOT::RVec expectedV = {4, 5}; + EXPECT_TRUE(ROOT::VecOps::All(expectedV == v)); + + // Bulk reading can also adopt a provided buffer. + auto buffer = std::make_unique(range.size()); + bulk.AdoptBuffer(buffer.get(), range.size()); + bulk.ReadBulk(*range.begin(), mask.get(), range.size()); + for (std::size_t i = 0; i < range.size(); i++) { + EXPECT_EQ(i + 4, buffer[i]); + } +} + TEST(RNTuple, ViewCast) { FileRaii fileGuard("test_ntuple_view_cast.root"); @@ -455,9 +522,10 @@ TEST(RNTuple, ViewFieldIteration) auto viewArray = reader->GetView("array"); EXPECT_EQ(1u, viewArray.GetFieldRange().size()); + auto viewEmpty = reader->GetView("empty"); try { - auto viewEmpty = reader->GetView("empty"); - FAIL() << "creating a view on an empty field should throw"; + viewEmpty.GetFieldRange(); + FAIL() << "accessing the field range of a view on an empty field should throw"; } catch (const RException &err) { EXPECT_THAT(err.what(), testing::HasSubstr("field iteration over empty fields is unsupported")); } diff --git a/tree/ntuple/v7/test/rfield_check.cxx b/tree/ntuple/v7/test/rfield_basics.cxx similarity index 72% rename from tree/ntuple/v7/test/rfield_check.cxx rename to tree/ntuple/v7/test/rfield_basics.cxx index 1a9674770efb6..3d4dd20e8e882 100644 --- a/tree/ntuple/v7/test/rfield_check.cxx +++ b/tree/ntuple/v7/test/rfield_basics.cxx @@ -1,9 +1,4 @@ -#include - -#include "CustomStruct.hxx" - -#include "gmock/gmock.h" -#include "gtest/gtest.h" +#include "ntuple_test.hxx" class NoDict {}; @@ -47,3 +42,22 @@ TEST(RField, Check) EXPECT_EQ("long double", report[0].fTypeName); EXPECT_THAT(report[0].fErrMsg, testing::HasSubstr("unknown type")); } + +TEST(RField, ValidNaming) +{ + try { + RFieldBase::Create("x.y", "float").Unwrap(); + FAIL() << "creating a field with an invalid name should throw"; + } catch (const RException &err) { + EXPECT_THAT(err.what(), testing::HasSubstr("name 'x.y' cannot contain character '.'")); + } + + auto field = RFieldBase::Create("x", "float").Unwrap(); + + try { + field->Clone("x.y"); + FAIL() << "cloning a field with an invalid name should throw"; + } catch (const RException &err) { + EXPECT_THAT(err.what(), testing::HasSubstr("name 'x.y' cannot contain character '.'")); + } +} diff --git a/tree/ntuple/v7/test/rfield_class.cxx b/tree/ntuple/v7/test/rfield_class.cxx index 832649111754b..532d867f686c4 100644 --- a/tree/ntuple/v7/test/rfield_class.cxx +++ b/tree/ntuple/v7/test/rfield_class.cxx @@ -233,8 +233,6 @@ TEST(RNTuple, TClassReadRules) { ROOT::TestSupport::CheckDiagsRAII diags; diags.requiredDiag(kWarning, "[ROOT.NTuple]", "ignoring I/O customization rule with non-transient member: a", false); - diags.optionalDiag(kWarning, "[ROOT.NTuple]", "The RNTuple file format will change.", false); - diags.optionalDiag(kWarning, "[ROOT.NTuple]", "Pre-release format version: RC 2", false); FileRaii fileGuard("test_ntuple_tclassrules.root"); char c[4] = {'R', 'O', 'O', 'T'}; diff --git a/tree/ntuple/v7/test/rfield_vector.cxx b/tree/ntuple/v7/test/rfield_vector.cxx index bddca9476faef..c7e72259aabb1 100644 --- a/tree/ntuple/v7/test/rfield_vector.cxx +++ b/tree/ntuple/v7/test/rfield_vector.cxx @@ -121,7 +121,7 @@ TEST(RNTuple, InsideCollection) ASSERT_NE(idKlass, ROOT::Experimental::kInvalidDescriptorId); auto idA = source->GetSharedDescriptorGuard()->FindFieldId("a", idKlass); ASSERT_NE(idA, ROOT::Experimental::kInvalidDescriptorId); - auto fieldInner = std::unique_ptr(RFieldBase::Create("klassVec.a", "float").Unwrap()); + auto fieldInner = std::unique_ptr(RFieldBase::Create("klassVec_a", "float").Unwrap()); fieldInner->SetOnDiskId(idA); auto field = std::make_unique("klassVec", std::move(fieldInner)); diff --git a/tree/ntupleutil/v7/src/RNTupleInspector.cxx b/tree/ntupleutil/v7/src/RNTupleInspector.cxx index f22fb95e12285..e9e58193b783d 100644 --- a/tree/ntupleutil/v7/src/RNTupleInspector.cxx +++ b/tree/ntupleutil/v7/src/RNTupleInspector.cxx @@ -248,7 +248,7 @@ void ROOT::Experimental::RNTupleInspector::PrintColumnTypeInfo(ENTupleInspectorP output << " column type | count | # elements | compressed bytes | uncompressed bytes\n" << "----------------|---------|-----------------|-------------------|--------------------" << std::endl; for (const auto &[colType, typeInfo] : colTypeInfo) { - output << std::setw(15) << Internal::RColumnElementBase::GetTypeName(colType) << " |" << std::setw(8) + output << std::setw(15) << Internal::RColumnElementBase::GetColumnTypeName(colType) << " |" << std::setw(8) << typeInfo.count << " |" << std::setw(16) << typeInfo.nElems << " |" << std::setw(18) << typeInfo.compressedSize << " |" << std::setw(18) << typeInfo.uncompressedSize << " " << std::endl; } @@ -256,8 +256,8 @@ void ROOT::Experimental::RNTupleInspector::PrintColumnTypeInfo(ENTupleInspectorP case ENTupleInspectorPrintFormat::kCSV: output << "columnType,count,nElements,compressedSize,uncompressedSize" << std::endl; for (const auto &[colType, typeInfo] : colTypeInfo) { - output << Internal::RColumnElementBase::GetTypeName(colType) << "," << typeInfo.count << "," << typeInfo.nElems - << "," << typeInfo.compressedSize << "," << typeInfo.uncompressedSize << std::endl; + output << Internal::RColumnElementBase::GetColumnTypeName(colType) << "," << typeInfo.count << "," + << typeInfo.nElems << "," << typeInfo.compressedSize << "," << typeInfo.uncompressedSize << std::endl; } break; default: throw RException(R__FAIL("Invalid print format")); @@ -300,7 +300,7 @@ ROOT::Experimental::RNTupleInspector::GetColumnTypeInfoAsHist(ROOT::Experimental default: throw RException(R__FAIL("Unknown histogram type")); } - hist->AddBinContent(hist->GetXaxis()->FindBin(Internal::RColumnElementBase::GetTypeName(colInfo.GetType())), + hist->AddBinContent(hist->GetXaxis()->FindBin(Internal::RColumnElementBase::GetColumnTypeName(colInfo.GetType())), data); } @@ -322,10 +322,10 @@ ROOT::Experimental::RNTupleInspector::GetPageSizeDistribution(ROOT::Experimental std::string histName, std::string histTitle, size_t nBins) { if (histName.empty()) - histName = "pageSizeHistCol" + std::string{Internal::RColumnElementBase::GetTypeName(colType)}; + histName = "pageSizeHistCol" + std::string{Internal::RColumnElementBase::GetColumnTypeName(colType)}; if (histTitle.empty()) histTitle = "Page size distribution for columns with type " + - std::string{Internal::RColumnElementBase::GetTypeName(colType)}; + std::string{Internal::RColumnElementBase::GetColumnTypeName(colType)}; auto perTypeHist = GetPageSizeDistribution({colType}, histName, histTitle, nBins); @@ -415,8 +415,8 @@ std::unique_ptr ROOT::Experimental::RNTupleInspector::GetPageSizeDistri for (const auto &[colType, pageSizesForColType] : pageSizes) { auto hist = std::make_unique( - TString::Format("%s%s", histName.c_str(), Internal::RColumnElementBase::GetTypeName(colType)), - Internal::RColumnElementBase::GetTypeName(colType), nBins, histMin, + TString::Format("%s%s", histName.c_str(), Internal::RColumnElementBase::GetColumnTypeName(colType)), + Internal::RColumnElementBase::GetColumnTypeName(colType), nBins, histMin, histMax + ((histMax - histMin) / static_cast(nBins))); for (const auto pageSize : pageSizesForColType) { diff --git a/tree/tree/src/TChain.cxx b/tree/tree/src/TChain.cxx index 2533490418e3e..afa8282e18764 100644 --- a/tree/tree/src/TChain.cxx +++ b/tree/tree/src/TChain.cxx @@ -985,7 +985,18 @@ Long64_t TChain::GetEntries() const return fProofChain->GetEntries(); } if (fEntries == TTree::kMaxEntries) { - const_cast(this)->LoadTree(TTree::kMaxEntries-1); + // If the following is true, we are within a recursion about friend, + // and `LoadTree` will be no-op. + if (kLoadTree & fFriendLockStatus) + return fEntries; + const auto readEntry = fReadEntry; + auto *thisChain = const_cast(this); + thisChain->LoadTree(TTree::kMaxEntries - 1); + thisChain->InvalidateCurrentTree(); + if (readEntry >= 0) + thisChain->LoadTree(readEntry); + else + thisChain->fReadEntry = readEntry; } return fEntries; } diff --git a/tree/treeplayer/inc/TTreeReaderGenerator.h b/tree/treeplayer/inc/TTreeReaderGenerator.h index ff8cedaeca6b7..ed1dade64517d 100644 --- a/tree/treeplayer/inc/TTreeReaderGenerator.h +++ b/tree/treeplayer/inc/TTreeReaderGenerator.h @@ -33,6 +33,8 @@ class TLeaf; namespace ROOT { namespace Internal { + TString GetCppName(TString name); + /// 0 for the general case, 1 when this a split clases inside a TClonesArray, /// 2 when this is a split classes inside an STL container. enum ELocation { kOut=0, kClones, kSTL }; diff --git a/tree/treeplayer/src/TTreeIndex.cxx b/tree/treeplayer/src/TTreeIndex.cxx index 15d6f248012e0..db0db91c6f73c 100644 --- a/tree/treeplayer/src/TTreeIndex.cxx +++ b/tree/treeplayer/src/TTreeIndex.cxx @@ -345,7 +345,7 @@ Long64_t TTreeIndex::GetEntryNumberFriend(const TTree *parent) if (!parent) return -3; // We reached the end of the parent tree Long64_t pentry = parent->GetReadEntry(); - if (pentry >= parent->GetEntries()) + if (pentry >= parent->GetEntriesFast()) return -2; GetMajorFormulaParent(parent); GetMinorFormulaParent(parent); diff --git a/tree/treeplayer/src/TTreePlayer.cxx b/tree/treeplayer/src/TTreePlayer.cxx index a1b33a3e13106..036153641c6c7 100644 --- a/tree/treeplayer/src/TTreePlayer.cxx +++ b/tree/treeplayer/src/TTreePlayer.cxx @@ -733,17 +733,20 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) opt.ToLower(); // Connect output files - if (!classname) classname = fTree->GetName(); + const TString fileNameStem = classname ? classname : fTree->GetName(); + const TString cppClassName = ROOT::Internal::GetCppName(fileNameStem); + if (cppClassName != fileNameStem) + Warning("TTreePlayer::MakeClass", "The %s name provided ('%s') is not a valid C++ identifier and will be converted to '%s'.",(classname ? "class" : "tree"), fileNameStem.Data(), cppClassName.Data()); TString thead; - thead.Form("%s.h", classname); + thead.Form("%s.h", fileNameStem.Data()); FILE *fp = fopen(thead, "w"); if (!fp) { Error("MakeClass","cannot open output file %s", thead.Data()); return 3; } TString tcimp; - tcimp.Form("%s.C", classname); + tcimp.Form("%s.C", fileNameStem.Data()); FILE *fpc = fopen(tcimp, "w"); if (!fpc) { Error("MakeClass","cannot open output file %s", tcimp.Data()); @@ -780,8 +783,8 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } fprintf(fp,"//////////////////////////////////////////////////////////\n"); fprintf(fp,"\n"); - fprintf(fp,"#ifndef %s_h\n",classname); - fprintf(fp,"#define %s_h\n",classname); + fprintf(fp,"#ifndef %s_h\n",cppClassName.Data()); + fprintf(fp,"#define %s_h\n",cppClassName.Data()); fprintf(fp,"\n"); fprintf(fp,"#include \n"); fprintf(fp,"#include \n"); @@ -858,11 +861,11 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fp,"\n"); if (opt.Contains("selector")) { - fprintf(fp,"class %s : public TSelector {\n",classname); + fprintf(fp,"class %s : public TSelector {\n",cppClassName.Data()); fprintf(fp,"public :\n"); fprintf(fp," TTree *fChain; //!pointer to the analyzed TTree or TChain\n"); } else { - fprintf(fp,"class %s {\n",classname); + fprintf(fp,"class %s {\n",cppClassName.Data()); fprintf(fp,"public :\n"); fprintf(fp," TTree *fChain; //!pointer to the analyzed TTree or TChain\n"); fprintf(fp," Int_t fCurrent; //!current Tree number in a TChain\n"); @@ -975,11 +978,11 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) leafStatus[l] = 0; } if (bre->GetType() == 3 || bre->GetType() == 4) { - fprintf(fp," %-15s %s_;\n","Int_t", branchname); + fprintf(fp," %-15s %s_;\n","Int_t", ROOT::Internal::GetCppName(branchname).Data()); continue; } if (bre->IsBranchFolder()) { - fprintf(fp," %-15s *%s;\n",bre->GetClassName(), branchname); + fprintf(fp," %-15s *%s;\n",bre->GetClassName(), ROOT::Internal::GetCppName(branchname).Data()); mustInit.Add(bre); continue; } else { @@ -987,16 +990,16 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } if (bre->GetStreamerType() < 0) { if (branch->GetListOfBranches()->GetEntriesFast()) { - fprintf(fp,"%s%-15s *%s;\n",headcom,bre->GetClassName(), branchname); + fprintf(fp,"%s%-15s *%s;\n",headcom,bre->GetClassName(), ROOT::Internal::GetCppName(branchname).Data()); } else { - fprintf(fp,"%s%-15s *%s;\n",head,bre->GetClassName(), branchname); + fprintf(fp,"%s%-15s *%s;\n",head,bre->GetClassName(), ROOT::Internal::GetCppName(branchname).Data()); mustInit.Add(bre); } continue; } if (bre->GetStreamerType() == 0) { if (!TClass::GetClass(bre->GetClassName())->HasInterpreterInfo()) {leafStatus[l] = 1; head = headcom;} - fprintf(fp,"%s%-15s *%s;\n",head,bre->GetClassName(), branchname); + fprintf(fp,"%s%-15s *%s;\n",head,bre->GetClassName(), ROOT::Internal::GetCppName(branchname).Data()); if (leafStatus[l] == 0) mustInit.Add(bre); continue; } @@ -1016,11 +1019,11 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) if (elem->IsA() == TStreamerBase::Class()) {leafStatus[l] = 1; continue;} if (!TClass::GetClass(elem->GetTypeName())) {leafStatus[l] = 1; continue;} if (!TClass::GetClass(elem->GetTypeName())->HasInterpreterInfo()) {leafStatus[l] = 1; head = headcom;} - if (leafcount) fprintf(fp,"%s%-15s %s[kMax%s];\n",head,elem->GetTypeName(), branchname,blen); - else fprintf(fp,"%s%-15s %s;\n",head,elem->GetTypeName(), branchname); + if (leafcount) fprintf(fp,"%s%-15s %s[kMax%s];\n",head,elem->GetTypeName(), ROOT::Internal::GetCppName(branchname).Data(),blen); + else fprintf(fp,"%s%-15s %s;\n",head,elem->GetTypeName(), ROOT::Internal::GetCppName(branchname).Data()); } else { if (!TClass::GetClass(bre->GetClassName())->HasInterpreterInfo()) {leafStatus[l] = 1; head = headcom;} - fprintf(fp,"%s%-15s %s;\n",head,bre->GetClassName(), branchname); + fprintf(fp,"%s%-15s %s;\n",head,bre->GetClassName(), ROOT::Internal::GetCppName(branchname).Data()); } continue; } @@ -1069,12 +1072,12 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } if (dimensions.Length()) { if (kmax) fprintf(fp," %-14s %s%s[kMax%s]%s; //[%s]\n",leaf->GetTypeName(), stars, - branchname,blen,dimensions.Data(),leafcountName); + ROOT::Internal::GetCppName(branchname).Data(),blen,dimensions.Data(),leafcountName); else fprintf(fp," %-14s %s%s[%d]%s; //[%s]\n",leaf->GetTypeName(), stars, - branchname,len,dimensions.Data(),leafcountName); + ROOT::Internal::GetCppName(branchname).Data(),len,dimensions.Data(),leafcountName); } else { - if (kmax) fprintf(fp," %-14s %s%s[kMax%s]; //[%s]\n",leaf->GetTypeName(), stars, branchname,blen,leafcountName); - else fprintf(fp," %-14s %s%s[%d]; //[%s]\n",leaf->GetTypeName(), stars, branchname,len,leafcountName); + if (kmax) fprintf(fp," %-14s %s%s[kMax%s]; //[%s]\n",leaf->GetTypeName(), stars, ROOT::Internal::GetCppName(branchname).Data(),blen,leafcountName); + else fprintf(fp," %-14s %s%s[%d]; //[%s]\n",leaf->GetTypeName(), stars, ROOT::Internal::GetCppName(branchname).Data(),len,leafcountName); } if (stars[0]=='*') { TNamed *n; @@ -1084,10 +1087,10 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } } else { if (strstr(branchname,"[")) len = 1; - if (len < 2) fprintf(fp," %-15s %s;\n",leaf->GetTypeName(), branchname); + if (len < 2) fprintf(fp," %-15s %s;\n",leaf->GetTypeName(), ROOT::Internal::GetCppName(branchname).Data()); else { - if (twodim) fprintf(fp," %-15s %s%s;\n",leaf->GetTypeName(), branchname,(char*)strstr(leaf->GetTitle(),"[")); - else fprintf(fp," %-15s %s[%d];\n",leaf->GetTypeName(), branchname,len); + if (twodim) fprintf(fp," %-15s %s%s;\n",leaf->GetTypeName(), ROOT::Internal::GetCppName(branchname).Data(),(char*)strstr(leaf->GetTitle(),"[")); + else fprintf(fp," %-15s %s[%d];\n",leaf->GetTypeName(), ROOT::Internal::GetCppName(branchname).Data(),len); } } } @@ -1104,8 +1107,8 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) // generate class member functions prototypes if (opt.Contains("selector")) { fprintf(fp,"\n"); - fprintf(fp," %s(TTree * /*tree*/ =0) : fChain(0) { }\n",classname) ; - fprintf(fp," ~%s() override { }\n",classname); + fprintf(fp," %s(TTree * /*tree*/ =0) : fChain(0) { }\n",cppClassName.Data()) ; + fprintf(fp," ~%s() override { }\n",cppClassName.Data()); fprintf(fp," Int_t Version() const override { return 2; }\n"); fprintf(fp," void Begin(TTree *tree) override;\n"); fprintf(fp," void SlaveBegin(TTree *tree) override;\n"); @@ -1119,15 +1122,15 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fp," TList* GetOutputList() const override { return fOutput; }\n"); fprintf(fp," void SlaveTerminate() override;\n"); fprintf(fp," void Terminate() override;\n\n"); - fprintf(fp," ClassDefOverride(%s,0);\n",classname); + fprintf(fp," ClassDefOverride(%s,0);\n",cppClassName.Data()); fprintf(fp,"};\n"); fprintf(fp,"\n"); fprintf(fp,"#endif\n"); fprintf(fp,"\n"); } else { fprintf(fp,"\n"); - fprintf(fp," %s(TTree *tree=0);\n",classname); - fprintf(fp," virtual ~%s();\n",classname); + fprintf(fp," %s(TTree *tree=0);\n",cppClassName.Data()); + fprintf(fp," virtual ~%s();\n",cppClassName.Data()); fprintf(fp," virtual Int_t Cut(Long64_t entry);\n"); fprintf(fp," virtual Int_t GetEntry(Long64_t entry);\n"); fprintf(fp," virtual Long64_t LoadTree(Long64_t entry);\n"); @@ -1141,9 +1144,9 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fp,"\n"); } // generate code for class constructor - fprintf(fp,"#ifdef %s_cxx\n",classname); + fprintf(fp,"#ifdef %s_cxx\n",cppClassName.Data()); if (!opt.Contains("selector")) { - fprintf(fp,"%s::%s(TTree *tree) : fChain(0) \n",classname,classname); + fprintf(fp,"%s::%s(TTree *tree) : fChain(0) \n",cppClassName.Data(),cppClassName.Data()); fprintf(fp,"{\n"); fprintf(fp,"// if parameter tree is not specified (or zero), connect the file\n"); fprintf(fp,"// used to generate this class and read the Tree.\n"); @@ -1199,7 +1202,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) // generate code for class destructor() if (!opt.Contains("selector")) { - fprintf(fp,"%s::~%s()\n",classname,classname); + fprintf(fp,"%s::~%s()\n",cppClassName.Data(),cppClassName.Data()); fprintf(fp,"{\n"); fprintf(fp," if (!fChain) return;\n"); if (isHbook) { @@ -1212,7 +1215,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } // generate code for class member function GetEntry() if (!opt.Contains("selector")) { - fprintf(fp,"Int_t %s::GetEntry(Long64_t entry)\n",classname); + fprintf(fp,"Int_t %s::GetEntry(Long64_t entry)\n",cppClassName.Data()); fprintf(fp,"{\n"); fprintf(fp,"// Read contents of entry.\n"); @@ -1222,7 +1225,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } // generate code for class member function LoadTree() if (!opt.Contains("selector")) { - fprintf(fp,"Long64_t %s::LoadTree(Long64_t entry)\n",classname); + fprintf(fp,"Long64_t %s::LoadTree(Long64_t entry)\n",cppClassName.Data()); fprintf(fp,"{\n"); fprintf(fp,"// Set the environment to read one entry\n"); fprintf(fp," if (!fChain) return -5;\n"); @@ -1238,7 +1241,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } // generate code for class member function Init(), first pass = get branch pointer - fprintf(fp,"void %s::Init(TTree *tree)\n",classname); + fprintf(fp,"void %s::Init(TTree *tree)\n",cppClassName.Data()); fprintf(fp,"{\n"); fprintf(fp," // The Init() function is called when the selector needs to initialize\n" " // a new tree or chain. Typically here the branch addresses and branch\n" @@ -1267,7 +1270,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) if (*bname == '>') *bname='_'; bname++; } - fprintf(fp," %s = 0;\n",branchname ); + fprintf(fp," %s = 0;\n",ROOT::Internal::GetCppName(branchname).Data() ); } } if (mustInitArr.Last()) { @@ -1338,9 +1341,9 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } if (leafcount) len = leafcount->GetMaximum()+1; if (len > 1) fprintf(fp,"%s fChain->SetBranchAddress(\"%s\", %s, &b_%s);\n", - maybedisable,branch->GetName(), branchname, R__GetBranchPointerName(leaf).Data()); + maybedisable,branch->GetName(), ROOT::Internal::GetCppName(branchname).Data(), R__GetBranchPointerName(leaf).Data()); else fprintf(fp,"%s fChain->SetBranchAddress(\"%s\", &%s, &b_%s);\n", - maybedisable,branch->GetName(), branchname, R__GetBranchPointerName(leaf).Data()); + maybedisable,branch->GetName(), ROOT::Internal::GetCppName(branchname).Data(), R__GetBranchPointerName(leaf).Data()); } //must call Notify in case of MakeClass if (!opt.Contains("selector")) { @@ -1351,7 +1354,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fp,"\n"); // generate code for class member function Notify() - fprintf(fp,"bool %s::Notify()\n",classname); + fprintf(fp,"bool %s::Notify()\n",cppClassName.Data()); fprintf(fp,"{\n"); fprintf(fp," // The Notify() function is called when a new file is opened. This\n" " // can be either for a new TTree in a TChain or when when a new TTree\n" @@ -1364,7 +1367,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) // generate code for class member function Show() if (!opt.Contains("selector")) { - fprintf(fp,"void %s::Show(Long64_t entry)\n",classname); + fprintf(fp,"void %s::Show(Long64_t entry)\n",cppClassName.Data()); fprintf(fp,"{\n"); fprintf(fp,"// Print contents of entry.\n"); fprintf(fp,"// If entry is not specified, print current entry\n"); @@ -1375,7 +1378,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } // generate code for class member function Cut() if (!opt.Contains("selector")) { - fprintf(fp,"Int_t %s::Cut(Long64_t entry)\n",classname); + fprintf(fp,"Int_t %s::Cut(Long64_t entry)\n",cppClassName.Data()); fprintf(fp,"{\n"); fprintf(fp,"// This function may be called from Loop.\n"); fprintf(fp,"// returns 1 if entry is accepted.\n"); @@ -1384,22 +1387,22 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fp," return 1;\n"); fprintf(fp,"}\n"); } - fprintf(fp,"#endif // #ifdef %s_cxx\n",classname); + fprintf(fp,"#endif // #ifdef %s_cxx\n",cppClassName.Data()); //======================Generate classname.C===================== if (!opt.Contains("selector")) { // generate code for class member function Loop() - fprintf(fpc,"#define %s_cxx\n",classname); + fprintf(fpc,"#define %s_cxx\n",cppClassName.Data()); fprintf(fpc,"#include \"%s\"\n",thead.Data()); fprintf(fpc,"#include \n"); fprintf(fpc,"#include \n"); fprintf(fpc,"#include \n"); fprintf(fpc,"\n"); - fprintf(fpc,"void %s::Loop()\n",classname); + fprintf(fpc,"void %s::Loop()\n",cppClassName.Data()); fprintf(fpc,"{\n"); fprintf(fpc,"// In a ROOT session, you can do:\n"); - fprintf(fpc,"// root> .L %s.C\n",classname); - fprintf(fpc,"// root> %s t\n",classname); + fprintf(fpc,"// root> .L %s.C\n",fileNameStem.Data()); + fprintf(fpc,"// root> %s t\n",cppClassName.Data()); fprintf(fpc,"// root> t.GetEntry(12); // Fill t data members with entry number 12\n"); fprintf(fpc,"// root> t.Show(); // Show values of entry 12\n"); fprintf(fpc,"// root> t.Show(16); // Read and show values of entry 16\n"); @@ -1432,8 +1435,8 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) } if (opt.Contains("selector")) { // generate usage comments and list of includes - fprintf(fpc,"#define %s_cxx\n",classname); - fprintf(fpc,"// The class definition in %s.h has been generated automatically\n",classname); + fprintf(fpc,"#define %s_cxx\n",cppClassName.Data()); + fprintf(fpc,"// The class definition in %s.h has been generated automatically\n",fileNameStem.Data()); fprintf(fpc,"// by the ROOT utility TTree::MakeSelector(). This class is derived\n"); fprintf(fpc,"// from the ROOT class TSelector. For more information on the TSelector\n" "// framework see $ROOTSYS/README/README.SELECTOR or the ROOT User Manual.\n\n"); @@ -1451,9 +1454,9 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fpc,"//\n"); fprintf(fpc,"// To use this file, try the following session on your Tree T:\n"); fprintf(fpc,"//\n"); - fprintf(fpc,"// root> T->Process(\"%s.C\")\n",classname); - fprintf(fpc,"// root> T->Process(\"%s.C\",\"some options\")\n",classname); - fprintf(fpc,"// root> T->Process(\"%s.C+\")\n",classname); + fprintf(fpc,"// root> T->Process(\"%s.C\")\n",fileNameStem.Data()); + fprintf(fpc,"// root> T->Process(\"%s.C\",\"some options\")\n",fileNameStem.Data()); + fprintf(fpc,"// root> T->Process(\"%s.C+\")\n",fileNameStem.Data()); fprintf(fpc,"//\n\n"); fprintf(fpc,"#include \"%s\"\n",thead.Data()); fprintf(fpc,"#include \n"); @@ -1461,7 +1464,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fpc,"\n"); // generate code for class member function Begin fprintf(fpc,"\n"); - fprintf(fpc,"void %s::Begin(TTree * /*tree*/)\n",classname); + fprintf(fpc,"void %s::Begin(TTree * /*tree*/)\n",cppClassName.Data()); fprintf(fpc,"{\n"); fprintf(fpc," // The Begin() function is called at the start of the query.\n"); fprintf(fpc," // When running with PROOF Begin() is only called on the client.\n"); @@ -1472,7 +1475,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fpc,"}\n"); // generate code for class member function SlaveBegin fprintf(fpc,"\n"); - fprintf(fpc,"void %s::SlaveBegin(TTree * /*tree*/)\n",classname); + fprintf(fpc,"void %s::SlaveBegin(TTree * /*tree*/)\n",cppClassName.Data()); fprintf(fpc,"{\n"); fprintf(fpc," // The SlaveBegin() function is called after the Begin() function.\n"); fprintf(fpc," // When running with PROOF SlaveBegin() is called on each slave server.\n"); @@ -1483,7 +1486,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fpc,"}\n"); // generate code for class member function Process fprintf(fpc,"\n"); - fprintf(fpc,"bool %s::Process(Long64_t entry)\n",classname); + fprintf(fpc,"bool %s::Process(Long64_t entry)\n",cppClassName.Data()); fprintf(fpc,"{\n"); fprintf(fpc," // The Process() function is called for each entry in the tree (or possibly\n" " // keyed object in the case of PROOF) to be processed. The entry argument\n" @@ -1501,13 +1504,13 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) " //\n" " // Use fStatus to set the return value of TTree::Process().\n" " //\n" - " // The return value is currently not used.\n\n", classname); + " // The return value is currently not used.\n\n", cppClassName.Data()); fprintf(fpc,"\n"); fprintf(fpc," return true;\n"); fprintf(fpc,"}\n"); // generate code for class member function SlaveTerminate fprintf(fpc,"\n"); - fprintf(fpc,"void %s::SlaveTerminate()\n",classname); + fprintf(fpc,"void %s::SlaveTerminate()\n",cppClassName.Data()); fprintf(fpc,"{\n"); fprintf(fpc," // The SlaveTerminate() function is called after all entries or objects\n" " // have been processed. When running with PROOF SlaveTerminate() is called\n" @@ -1517,7 +1520,7 @@ Int_t TTreePlayer::MakeClass(const char *classname, const char *option) fprintf(fpc,"}\n"); // generate code for class member function Terminate fprintf(fpc,"\n"); - fprintf(fpc,"void %s::Terminate()\n",classname); + fprintf(fpc,"void %s::Terminate()\n",cppClassName.Data()); fprintf(fpc,"{\n"); fprintf(fpc," // The Terminate() function is the last function to be called during\n" " // a query. It always runs on the client, it can be used to present\n" diff --git a/tree/treeplayer/src/TTreeReaderGenerator.cxx b/tree/treeplayer/src/TTreeReaderGenerator.cxx index 2dd53a67e10ed..120889b640b64 100644 --- a/tree/treeplayer/src/TTreeReaderGenerator.cxx +++ b/tree/treeplayer/src/TTreeReaderGenerator.cxx @@ -31,10 +31,20 @@ #include "TObjString.h" #include "TVirtualCollectionProxy.h" #include "TVirtualStreamerInfo.h" +#include "TInterpreter.h" namespace ROOT { namespace Internal { + /** + * @brief Convert a valid TTree branch name or filename into a valid C++ variable name + * @param name a TString with the original name + * @return a TString with the converted name valid to use as C++ variable in a script + */ + TString GetCppName(TString name) { + return gInterpreter->MapCppName(name.Data()); + } + //////////////////////////////////////////////////////////////////////////////// /// Constructor. Analyzes the tree and writes selector. @@ -807,7 +817,11 @@ namespace Internal { //======================Generate classname.h===================== TString thead; - thead.Form("%s.h", fClassname.Data()); + const TString fileNameStem = fClassname; + const TString cppClassName = ROOT::Internal::GetCppName(fClassname); + if (cppClassName != fileNameStem) + Warning("TTreeReaderGenerator::WriteSelector", "The class name provided ('%s') is not a valid C++ identifier and will be converted to '%s', the code produced will likely fail to compile.", fileNameStem.Data(), cppClassName.Data()); + thead.Form("%s.h", fileNameStem.Data()); std::ofstream ofs (thead, std::ofstream::out); if (!ofs) { Error("WriteSelector","cannot open output file %s", thead.Data()); @@ -828,8 +842,8 @@ R"CODE(////////////////////////////////////////////////////////// ofs << R"CODE(////////////////////////////////////////////////////////// -#ifndef )CODE" << fClassname << R"CODE(_h -#define )CODE" << fClassname << R"CODE(_h +#ifndef )CODE" << cppClassName << R"CODE(_h +#define )CODE" << cppClassName << R"CODE(_h #include #include @@ -854,7 +868,7 @@ R"CODE(#include // Generate class declaration with TTreeReaderValues and Arrays ofs << -R"CODE(class )CODE" << fClassname << R"CODE( : public TSelector { +R"CODE(class )CODE" << cppClassName << R"CODE( : public TSelector { public : TTreeReader fReader; //!the tree reader TTree *fChain = 0; //!pointer to the analyzed TTree or TChain @@ -864,17 +878,18 @@ public : next = &fListOfReaders; TTreeReaderDescriptor *descriptor; while ( ( descriptor = (TTreeReaderDescriptor*)next() ) ) { + const TString validName = ROOT::Internal::GetCppName(descriptor->fName); ofs << " TTreeReader" << (descriptor->fType == TTreeReaderDescriptor::ReaderType::kValue ? "Value" : "Array") << "<" << descriptor->fDataType - << "> " << descriptor->fName + << "> " << validName << " = {fReader, \"" << descriptor->fBranchName << "\"};" << std::endl; } // Generate class member functions prototypes ofs << R"CODE( - )CODE" << fClassname << R"CODE((TTree * /*tree*/ =0) { } - ~)CODE" << fClassname << R"CODE(() override { } + )CODE" << cppClassName << R"CODE((TTree * /*tree*/ =0) { } + ~)CODE" << cppClassName << R"CODE(() override { } Int_t Version() const override { return 2; } void Begin(TTree *tree) override; void SlaveBegin(TTree *tree) override; @@ -889,14 +904,14 @@ R"CODE( void SlaveTerminate() override; void Terminate() override; - ClassDefOverride()CODE" << fClassname << R"CODE(,0); + ClassDefOverride()CODE" << cppClassName << R"CODE(,0); }; #endif -#ifdef )CODE" << fClassname << R"CODE(_cxx -void )CODE" << fClassname << R"CODE(::Init(TTree *tree) +#ifdef )CODE" << cppClassName << R"CODE(_cxx +void )CODE" << cppClassName << R"CODE(::Init(TTree *tree) { // The Init() function is called when the selector needs to initialize // a new tree or chain. Typically here the reader is initialized. @@ -908,7 +923,7 @@ void )CODE" << fClassname << R"CODE(::Init(TTree *tree) fReader.SetTree(tree); } -bool )CODE" << fClassname << R"CODE(::Notify() +bool )CODE" << cppClassName << R"CODE(::Notify() { // The Notify() function is called when a new file is opened. This // can be either for a new TTree in a TChain or when when a new TTree @@ -920,13 +935,13 @@ bool )CODE" << fClassname << R"CODE(::Notify() } -#endif // #ifdef )CODE" << fClassname << R"CODE(_cxx +#endif // #ifdef )CODE" << cppClassName << R"CODE(_cxx )CODE"; ofs.close(); //======================Generate classname.C===================== TString tcimp; - tcimp.Form("%s.C", fClassname.Data()); + tcimp.Form("%s.C", fileNameStem.Data()); std::ofstream ofsc (tcimp, std::ofstream::out); if (!ofsc) { Error("WriteSelector","cannot open output file %s", tcimp.Data()); @@ -934,8 +949,8 @@ bool )CODE" << fClassname << R"CODE(::Notify() } ofsc << -R"CODE(#define )CODE" << fClassname << R"CODE(_cxx -// The class definition in )CODE" << fClassname << R"CODE(.h has been generated automatically +R"CODE(#define )CODE" << cppClassName << R"CODE(_cxx +// The class definition in )CODE" << cppClassName << R"CODE(.h has been generated automatically // by the ROOT utility TTree::MakeSelector(). This class is derived // from the ROOT class TSelector. For more information on the TSelector // framework see $ROOTSYS/README/README.SELECTOR or the ROOT User Manual. @@ -965,7 +980,7 @@ R"CODE(#define )CODE" << fClassname << R"CODE(_cxx #include #include -void )CODE" << fClassname << R"CODE(::Begin(TTree * /*tree*/) +void )CODE" << cppClassName << R"CODE(::Begin(TTree * /*tree*/) { // The Begin() function is called at the start of the query. // When running with PROOF Begin() is only called on the client. @@ -974,7 +989,7 @@ void )CODE" << fClassname << R"CODE(::Begin(TTree * /*tree*/) TString option = GetOption(); } -void )CODE" << fClassname << R"CODE(::SlaveBegin(TTree * /*tree*/) +void )CODE" << cppClassName << R"CODE(::SlaveBegin(TTree * /*tree*/) { // The SlaveBegin() function is called after the Begin() function. // When running with PROOF SlaveBegin() is called on each slave server. @@ -984,7 +999,7 @@ void )CODE" << fClassname << R"CODE(::SlaveBegin(TTree * /*tree*/) } -bool )CODE" << fClassname << R"CODE(::Process(Long64_t entry) +bool )CODE" << cppClassName << R"CODE(::Process(Long64_t entry) { // The Process() function is called for each entry in the tree (or possibly // keyed object in the case of PROOF) to be processed. The entry argument @@ -1007,7 +1022,7 @@ bool )CODE" << fClassname << R"CODE(::Process(Long64_t entry) return true; } -void )CODE" << fClassname << R"CODE(::SlaveTerminate() +void )CODE" << cppClassName << R"CODE(::SlaveTerminate() { // The SlaveTerminate() function is called after all entries or objects // have been processed. When running with PROOF SlaveTerminate() is called @@ -1015,7 +1030,7 @@ void )CODE" << fClassname << R"CODE(::SlaveTerminate() } -void )CODE" << fClassname << R"CODE(::Terminate() +void )CODE" << cppClassName << R"CODE(::Terminate() { // The Terminate() function is the last function to be called during // a query. It always runs on the client, it can be used to present diff --git a/tree/treeplayer/test/CMakeLists.txt b/tree/treeplayer/test/CMakeLists.txt index 0f53d7644284b..4edf2fda38cfc 100644 --- a/tree/treeplayer/test/CMakeLists.txt +++ b/tree/treeplayer/test/CMakeLists.txt @@ -23,3 +23,5 @@ endif() ROOT_ADD_GTEST(ttreeindex_clone ttreeindex_clone.cxx LIBRARIES TreePlayer) ROOT_ADD_GTEST(ttreereader_friends ttreereader_friends.cxx LIBRARIES TreePlayer) + +ROOT_ADD_GTEST(ttreeindex_getlistoffriends ttreeindex_getlistoffriends.cxx LIBRARIES TreePlayer) diff --git a/tree/treeplayer/test/ttreeindex_getlistoffriends.cxx b/tree/treeplayer/test/ttreeindex_getlistoffriends.cxx new file mode 100644 index 0000000000000..4685c93409c61 --- /dev/null +++ b/tree/treeplayer/test/ttreeindex_getlistoffriends.cxx @@ -0,0 +1,114 @@ +#include "TBranch.h" +#include "TChain.h" +#include "TCollection.h" // TRangeDynCast +#include "TFile.h" +#include "TFriendElement.h" +#include "TObjArray.h" +#include "TTree.h" + +#include "gtest/gtest.h" + +#include +#include +#include + +void write_data(std::string_view treename, std::string_view filename) +{ + TFile f{filename.data(), "update"}; + TTree t{treename.data(), treename.data()}; + int runNumber{}; + int eventNumber{}; + float val{}; + t.Branch("runNumber", &runNumber, "runNumber/I"); + t.Branch("eventNumber", &eventNumber, "eventNumber/I"); + t.Branch("val", &val, "val/F"); + if (treename == "main") { + for (auto rn = 0; rn < 3; rn++) { + runNumber = rn; + for (auto en = 0; en < 5; en++) { + eventNumber = en; + val = en * rn; + t.Fill(); + } + } + } else { + for (auto rn = 0; rn < 3; rn++) { + runNumber = rn; + for (auto en = 4; en >= 0; en--) { + eventNumber = en; + val = en * rn; + t.Fill(); + } + } + } + + f.Write(); +} + +struct TTreeIndexGH_17820 : public ::testing::Test { + constexpr static auto fFileName{"ttreeindex_getlistoffriends_gh_17820.root"}; + constexpr static auto fMainName{"main"}; + constexpr static auto fFriendName{"friend"}; + + static void SetUpTestCase() + { + write_data(fMainName, fFileName); + write_data(fFriendName, fFileName); + } + + static void TearDownTestCase() { std::remove(fFileName); } +}; + +void expect_branch_names(const TObjArray *branches, const std::vector &branchNames) +{ + auto nBranchNames = branchNames.size(); + decltype(nBranchNames) nBranches{}; + for (const auto *br : TRangeDynCast(branches)) { + EXPECT_STREQ(br->GetName(), branchNames[nBranches].c_str()); + nBranches++; + } + EXPECT_EQ(nBranches, nBranchNames); +} + +// Regression test for https://github.com/root-project/root/issues/17820 +TEST_F(TTreeIndexGH_17820, RunTest) +{ + TChain mainChain{fMainName}; + mainChain.AddFile(fFileName); + + TChain friendChain{fFriendName}; + friendChain.AddFile(fFileName); + friendChain.BuildIndex("runNumber", "eventNumber"); + + mainChain.AddFriend(&friendChain); + + // Calling GetEntries used to mess with the fTree data member of the main + // chain, not connecting it to the friend chain and thus losing the list + // of friends. This in turn corrupted the list of branches. + mainChain.GetEntries(); + + const auto *listOfBranches = mainChain.GetListOfBranches(); + ASSERT_TRUE(listOfBranches); + + const std::vector expectedNames{"runNumber", "eventNumber", "val"}; + + expect_branch_names(listOfBranches, expectedNames); + + const auto *curTree = mainChain.GetTree(); + ASSERT_TRUE(curTree); + + const auto *listOfFriends = mainChain.GetTree()->GetListOfFriends(); + ASSERT_TRUE(listOfFriends); + EXPECT_EQ(listOfFriends->GetEntries(), 1); + + auto *friendTree = dynamic_cast(dynamic_cast(listOfFriends->At(0))->GetTree()); + ASSERT_TRUE(friendTree); + + EXPECT_STREQ(friendTree->GetName(), fFriendName); + const auto *friendFile = friendTree->GetCurrentFile(); + ASSERT_TRUE(friendFile); + EXPECT_STREQ(friendFile->GetName(), fFileName); + + const auto *friendBranches = friendTree->GetListOfBranches(); + expect_branch_names(friendBranches, expectedNames); +} diff --git a/tutorials/CMakeLists.txt b/tutorials/CMakeLists.txt index 9258a04636d54..4ff603360eac3 100644 --- a/tutorials/CMakeLists.txt +++ b/tutorials/CMakeLists.txt @@ -44,6 +44,9 @@ set(TUTORIAL_ENV ${ROOT_environ} OMP_NUM_THREADS=1 OPENBLAS_NUM_THREADS=1 MKL_NU #---Copy the CTestCustom.cmake file into the build directory-------- configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake ${CMAKE_CURRENT_BINARY_DIR} COPYONLY) +#---Copy the input file of the quadp portfolio example to the build directory-------- +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/quadp/stock.root ${CMAKE_CURRENT_BINARY_DIR} COPYONLY) + #---Provide a rootlogon.C file in the current build directory that # will affect the way we run all tutorials. # This overwrites the existing rootlogon.C and rootalias.C in the @@ -259,6 +262,8 @@ endif() if(MSVC) #---Multiproc is not supported on Windows set(imt_veto ${imt_veto} multicore/mp*.C multicore/mtbb201_parallelHistoFill.C) + #---XRootD is not supported on Windows + set(imt_veto ${imt_veto} multicore/imt101_parTreeProcessing.C) endif() if(ROOT_CLASSIC_BUILD) @@ -376,7 +381,7 @@ if(root7) list(APPEND root7_veto rcanvas/df105.py) endif() if(MSVC AND NOT win_broken_tests) - #---EOS is not supported on Windows + #---XRootD is not supported on Windows list(APPEND root7_veto rcanvas/df104.py) list(APPEND root7_veto rcanvas/df105.py) list(APPEND root7_veto rcanvas/rbox.py) @@ -421,6 +426,13 @@ set(extra_veto eve7/*.C r/rootlogon.C) +if(MSVC) + # disable run_h1analysis.C because of Endpoint Security HTTP traffic scanning, + # which is corrupting the data on Windows + list(APPEND extra_veto tree/run_h1analysis.C) + list(APPEND extra_veto legacy/th2polyEurope.C) # needs to download from the web +endif() + if(MSVC AND NOT llvm13_broken_tests) list(APPEND extra_veto math/exampleFunction.py @@ -645,6 +657,13 @@ if (PY_SONNET_FOUND AND PY_GRAPH_NETS_FOUND) set (tmva-TMVA_SOFIE_GNN_Application-depends tutorial-tmva-TMVA_SOFIE_GNN_Parser) endif() +# Download open data csv only once from a python tutorial, the C++ ones depend on it +set(dataframe-df014_CSVDataSource-depends tutorial-dataframe-df014_CSVDataSource-py) +set(dataframe-df015_LazyDataSource-depends tutorial-dataframe-df014_CSVDataSource-py) + +# Download the input root file only once from a python tutorial, the C++ one depends on it +set(hist-th2polyUSA-depends tutorial-hist-th2polyUSA-py) + #---Loop over all tutorials and define the corresponding test--------- foreach(t ${tutorials}) list(FIND returncode_1 ${t} index) @@ -925,11 +944,17 @@ if(ROOT_pyroot_FOUND) endif() endforeach() + if(NOT ${${tname}-depends} STREQUAL ${tutorial_name}) + set(tutorial_dependency ${${tname}-depends}) + else() + set(tutorial_dependency "") + endif() + ROOT_ADD_TEST(${tutorial_name} COMMAND ${Python3_EXECUTABLE} ${setThreadPoolSize} ${thisTestPoolSize} ${CMAKE_CURRENT_SOURCE_DIR}/${t} PASSRC ${rc} FAILREGEX "Error in" ": error:" "segmentation violation" LABELS ${labels} - DEPENDS ${${tname}-depends} + DEPENDS ${tutorial_dependency} ENVIRONMENT ${TUTORIAL_ENV} PYTHON_DEPS ${python_deps} ${py_will_fail}) diff --git a/tutorials/dataframe/df014_CSVDataSource.C b/tutorials/dataframe/df014_CSVDataSource.C index c01ae1f03ee70..8e7a039db6de9 100644 --- a/tutorials/dataframe/df014_CSVDataSource.C +++ b/tutorials/dataframe/df014_CSVDataSource.C @@ -24,7 +24,7 @@ int df014_CSVDataSource() // Let's first create a RDF that will read from the CSV file. // The types of the columns will be automatically inferred. auto fileNameUrl = "http://root.cern/files/tutorials/df014_CsvDataSource_MuRun2010B.csv"; - auto fileName = "df014_CsvDataSource_MuRun2010B_cpp.csv"; + auto fileName = "CsvDataSource_MuRun2010B.csv"; if(gSystem->AccessPathName(fileName)) TFile::Cp(fileNameUrl, fileName); auto df = ROOT::RDF::FromCSV(fileName); diff --git a/tutorials/dataframe/df014_CSVDataSource.py b/tutorials/dataframe/df014_CSVDataSource.py index 54bddcb5972b0..28fdb9337900c 100644 --- a/tutorials/dataframe/df014_CSVDataSource.py +++ b/tutorials/dataframe/df014_CSVDataSource.py @@ -20,14 +20,15 @@ ## \author Enric Tejedor (CERN) import ROOT +import urllib.request import os # Let's first create a RDF that will read from the CSV file. # The types of the columns will be automatically inferred. fileNameUrl = "http://root.cern/files/tutorials/df014_CsvDataSource_MuRun2010B.csv" -fileName = "df014_CsvDataSource_MuRun2010B_py.csv" +fileName = "CsvDataSource_MuRun2010B.csv" if not os.path.isfile(fileName): - ROOT.TFile.Cp(fileNameUrl, fileName) + urllib.request.urlretrieve(fileNameUrl, fileName) df = ROOT.RDF.FromCSV(fileName) diff --git a/tutorials/dataframe/df015_LazyDataSource.C b/tutorials/dataframe/df015_LazyDataSource.C index 93005dbe4fe44..f9e8fee18d9b3 100644 --- a/tutorials/dataframe/df015_LazyDataSource.C +++ b/tutorials/dataframe/df015_LazyDataSource.C @@ -26,7 +26,7 @@ int df015_LazyDataSource() // Let's first create a RDF that will read from the CSV file. // See the tutorial (https://root.cern/doc/master/df014__CSVDataSource_8C.html) on CSV data sources for more details! auto fileNameUrl = "http://root.cern/files/tutorials/df014_CsvDataSource_MuRun2010B.csv"; - auto fileName = "df015_CsvDataSource_MuRun2010B.csv"; + auto fileName = "CsvDataSource_MuRun2010B.csv"; if(gSystem->AccessPathName(fileName)) TFile::Cp(fileNameUrl, fileName); diff --git a/tutorials/eve7/collection_proxies.C b/tutorials/eve7/collection_proxies.C index f1e61b8418091..2a93cdb02bdbe 100644 --- a/tutorials/eve7/collection_proxies.C +++ b/tutorials/eve7/collection_proxies.C @@ -131,8 +131,11 @@ public: { RCaloTower* tower = (RCaloTower*) fCollection->GetDataPtr(t); if (tower->fEta > cd.fEtaMin && tower->fEta < cd.fEtaMax && - tower->fPhi > cd.fPhiMin && tower->fPhi < cd.fPhiMax) - item_set.insert(t); + tower->fPhi > cd.fPhiMin && tower->fPhi < cd.fPhiMax && + fCollection->GetDataItem(t)->GetVisible()) + { + item_set.insert(t); + } } } REveSelection* sel = (REveSelection*)eveMng->FindElementById(selectionId); diff --git a/tutorials/eve7/event_demo.C b/tutorials/eve7/event_demo.C index 6881b8fe6483c..3708586596fd3 100644 --- a/tutorials/eve7/event_demo.C +++ b/tutorials/eve7/event_demo.C @@ -168,6 +168,9 @@ void makeGeometryScene() // Debug of surface fill in RPhi (index buffer screwed). // b1->SetNSegments(3); b1->SetNSegments(40); + + // an example of axis guides + eveMng->GetDefaultViewer()->SetAxesType(REX::REveViewer::EAxesType::kAxesOrigin); } diff --git a/tutorials/graphics/AtlasExample.C b/tutorials/graphics/AtlasExample.C deleted file mode 100644 index b603c525731dc..0000000000000 --- a/tutorials/graphics/AtlasExample.C +++ /dev/null @@ -1,225 +0,0 @@ -/// \file -/// \ingroup tutorial_graphics -/// \notebook -js -/// Show how ATLAS Style looks like. It is based on a style file from BaBar. -/// -/// \macro_image -/// \macro_code -/// -/// \author M.Sutton - -const Int_t GMAX=864; - -const int nren=3; -static const double mur[nren] = {1.0,0.25,4.0}; -static const double muf[nren] = {1.0,0.25,4.0}; -const unsigned int NUMPDF=41; - -TGraphErrors* GetGraph(Int_t ir, Int_t ifs,Int_t icut, Int_t ipdf); -void AddtoBand(TGraphErrors* g1, TGraphAsymmErrors* g2); -TGraphAsymmErrors* MakeBand(TGraphErrors* g0, TGraphErrors* g1,TGraphErrors* g2); - -void AtlasExample() -{ - gROOT->SetStyle("ATLAS"); - - Int_t icol1=5; - Int_t icol2=5; - - auto canvas = new TCanvas("canvas","single inclusive jets",50,50,600,600); - canvas->SetLogy(); - - Double_t ymin = 1.e-3; Double_t ymax = 2e7; - Double_t xmin = 60.00; Double_t xmax = 3500.; - auto frame = canvas->DrawFrame(xmin,ymin,xmax,ymax); - frame->SetYTitle("d#sigma_{jet}/dE_{T,jet} [fb/GeV]"); - frame->SetXTitle("E_{T,jet} [GeV]"); - frame->GetYaxis()->SetTitleOffset(1.4); - frame->GetXaxis()->SetTitleOffset(1.4); - - const Int_t ncut=1; - TGraphErrors *data[ncut]; - - for (Int_t icut=0; icutSetLineColor(1); - g1[ir][icut]->SetMarkerStyle(0); - } - - char daname[100]; - sprintf(daname,"data_%d",icut); - data[icut] = (TGraphErrors*)g1[0][icut]->Clone(daname); - data[icut]->SetMarkerStyle(20); - data[icut]->SetMarkerColor(1); - - // Just invent some data - for (Int_t i=0; i< data[icut]->GetN(); i++) { - Double_t x1,y1,e,dx1=0.; - data[icut]->GetPoint(i,x1,y1); - Double_t r1 = 0.4*(gRandom->Rndm(1)+2); - Double_t r2 = 0.4*(gRandom->Rndm(1)+2); - Double_t y; - if (icut==0) y = r1*y1+r1*r2*r2*x1/50000.; - else y = r1*y1; - e = sqrt(y*1000)/200; - data[icut]->SetPoint(i, x1,y); - data[icut]->SetPointError(i,dx1,e); - } - - TGraphAsymmErrors* scale[ncut]; - TGraphAsymmErrors* scalepdf[ncut]; - - scale[icut] = MakeBand(g1[0][icut],g1[1][icut],g1[2][icut]); - scalepdf[icut]=(TGraphAsymmErrors* ) scale[icut]->Clone("scalepdf"); - - TGraphErrors *gpdf[NUMPDF][ncut]; - for (Int_t ipdf=0; ipdfSetLineColor(2); - gpdf[ipdf][icut]->SetLineStyle(1); - gpdf[ipdf][icut]->SetMarkerStyle(0); - AddtoBand(gpdf[ipdf][icut],scalepdf[icut]); - } - - scalepdf[icut]->SetFillColor(icol2); - scalepdf[icut]->Draw("zE2"); - scalepdf[icut]->SetLineWidth(3); - scale[icut]->SetFillColor(icol1); - scale[icut]->Draw("zE2"); - g1[0][icut]->SetLineWidth(3); - g1[0][icut]->Draw("z"); - data[icut]->Draw("P"); - } - - TLatex t; t.SetNDC(); - t.DrawLatex(0.3, 0.85, "#sqrt{s}= 14 TeV"); - t.DrawLatex(0.57, 0.85, "|#eta_{jet}|<0.5"); - - auto l = new TLegend(0.45,0.65,0.8,0.8,"","NDC"); - l->SetBorderSize(0.); - l->SetTextFont(42); - l->AddEntry("data_0", "Data 2009", "ep"); - l->AddEntry("scalepdf", "NLO QCD", "lf"); - l->Draw(); -} - -TGraphErrors* GetGraph(Int_t ir, Int_t ifs,Int_t icut, Int_t ipdf) -{ - const char *cuts[5] = - {"0.0 <= |eta| < 0.5", - "0.5 <= |eta| < 1.0", - "1.0 <= |eta| < 1.5", - "1.5 <= |eta| < 2.0", - "2.0 <= |eta| < 3.0"}; - - const double mur[] = {1.0,0.25,4.0}; - const double muf[] = {1.0,0.25,4.0}; - - TFile::SetCacheFileDir("."); - TFile *file = TFile::Open("http://root.cern/files/AtlasGraphs.root", "CACHEREAD"); - - char gname[100]; - char tname[100]; - - if (ipdf>=0) - sprintf(tname," E_T (mu_r=%g, mu_f=%g);%s Pdf: %d",mur[ir],muf[ifs],cuts[icut],ipdf); - else - sprintf(tname," E_T %s Ms= %d",cuts[icut],-ipdf); - - TGraphErrors* g1 = 0; - - for (int i=1; i<=GMAX; i++) { - sprintf(gname,"full_%d",i); - g1 = (TGraphErrors*) file->Get(gname); - if (!g1) { - cout << gname << " not found " << endl; - return nullptr; - } - - const char *title = g1->GetTitle(); - - if (strcmp(title,tname)==0) break; - g1 = 0; - } - - if (!g1) return nullptr; - return g1; -} - -TGraphAsymmErrors* MakeBand(TGraphErrors* g0, TGraphErrors* g1,TGraphErrors* g2) { - - TGraphAsymmErrors* g3 = new TGraphAsymmErrors(); - - Double_t x1 = 0., y1 = 0., x2 = 0., y2 = 0., y0 = 0, x3 = 0.; - Double_t dum; - for (Int_t i=0; iGetN(); i++) { - g0->GetPoint(i, x1, y0); - g1->GetPoint(i, x1, y1); - g2->GetPoint(i, x1, y2); - - if (i==g1->GetN()-1) x2=x1; - else g2->GetPoint(i+1,x2,dum); - - if (i==0) x3=x1; - else g2->GetPoint(i-1,x3,dum); - - Double_t tmp = y2; - if (y1 < y2) { - y2 = y1; - y1 = tmp; - } - g3->SetPoint(i,x1,y0); - - Double_t binwl = (x1-x3)/2.; - Double_t binwh = (x2-x1)/2.; - if (binwl == 0.) binwl = binwh; - if (binwh == 0.) binwh = binwl; - g3->SetPointError(i, binwl, binwh, y0-y2, y1-y0); - - } - return g3; -} - -void AddtoBand(TGraphErrors* g1, TGraphAsymmErrors* g2) { - - Double_t x1=0., y1=0., y2=0., y0=0; - - if (g1->GetN()!=g2->GetN()) - cout << " graphs don't have the same number of elements " << endl; - - Double_t* EYhigh = g2-> GetEYhigh(); - Double_t* EYlow = g2-> GetEYlow(); - - for (Int_t i=0; iGetN(); i++) { - g1->GetPoint(i, x1, y1); - g2->GetPoint(i, x1, y2); - - if ( y1==0 || y2==0 ) - cerr << "check these points very carefully : AddtoBand() : point " << i << endl; - - Double_t eyh=0., eyl=0.; - - y0 = y1-y2; - if (y0 != 0) { - if (y0 > 0) { - eyh = EYhigh[i]; - eyh = sqrt(eyh*eyh+y0*y0); - g2->SetPointEYhigh(i, eyh); - } else { - eyl = EYlow[i]; - eyl = sqrt(eyl*eyl+y0*y0); - g2->SetPointEYlow (i, eyl); - } - } - } -} diff --git a/tutorials/hist/hist039_TH2Poly_usa.py b/tutorials/hist/hist039_TH2Poly_usa.py new file mode 100644 index 0000000000000..db487e74e47af --- /dev/null +++ b/tutorials/hist/hist039_TH2Poly_usa.py @@ -0,0 +1,68 @@ +# \file +# \ingroup tutorial_hist +# \notebook -js +# \preview This tutorial illustrates how to create an histogram with polygonal +# bins (TH2Poly), fill it and draw it using the `col` option. The initial data +# are stored in TMultiGraphs. They represent the USA map. Such histograms can +# be rendered in 3D using the option `legogl`. +# +# The initial data have been downloaded from: http://www.maproom.psu.edu/dcw/ +# This database was developed in 1991/1992 and national boundaries reflect +# political reality as of that time. +# +# \macro_code +# \macro_image +# +# \date February 2024 +# \author Olivier Couet + +import ROOT +import os.path +import urllib.request + +states = ["alabama", "arizona", "arkansas", "california", "colorado", "connecticut", "delaware", + "florida", "georgia", "idaho", "illinois", "indiana", "iowa", "kansas", + "kentucky", "louisiana", "maine", "maryland", "massachusetts", "michigan", "minnesota", + "mississippi", "missouri", "montana", "nebraska", "nevada", "new_hampshire", "new_jersey", + "new_mexico", "new_york", "north_carolina", "north_dakota", "ohio", "oklahoma", "oregon", + "pennsylvania", "rhode_island", "south_carolina", "south_dakota", "tennessee", "texas", "utah", + "vermont", "virginia", "washington", "west_virginia", "wisconsin", "wyoming"] + +pops = [4708708, 6595778, 2889450, 36961664, 5024748, 3518288, 885122, 18537969, 9829211, 1545801, + 12910409, 6423113, 3007856, 2818747, 4314113, 4492076, 1318301, 5699478, 6593587, 9969727, + 5266214, 2951996, 5987580, 974989, 1796619, 2643085, 1324575, 8707739, 2009671, 19541453, + 9380884, 646844, 11542645, 3687050, 3825657, 12604767, 1053209, 4561242, 812383, 6296254, + 24782302, 2784572, 621760, 7882590, 6664195, 1819777, 5654774, 544270] + +usa = ROOT.TCanvas("USA", "USA") +usa.ToggleEventStatus() +lon1 = -130 +lon2 = -65 +lat1 = 24 +lat2 = 50 +p = ROOT.TH2Poly("Lower48", "Lower 48 Population (2009);Latitude;Longitude", lon1, lon2, lat1, lat2) + +fileName = "usa.root" +fileNameUrl = "http://root.cern/files/usa.root" +if not os.path.isfile(fileName): + urllib.request.urlretrieve(fileNameUrl, fileName) +f = ROOT.TFile.Open(fileName) + +# Define the TH2Poly bins. +mg = ROOT.TMultiGraph +for key in f.GetListOfKeys(): + obj = key.ReadObj() + if (obj.InheritsFrom("TMultiGraph")): + p.AddBin(obj) + +# Fill TH2Poly, with capital letters for the states names +for state, pop in zip (states, pops): + p.Fill(state, pop) + +ROOT.gStyle.SetOptStat(0) +p.Draw("colz textn") + +# Add the reference for the population +pupulationRef = ROOT.TLatex(-128, 27, "#scale[.55]{#splitline{Source:}{http://eadiv.state.wy.us/pop/st-09est.htm}}") +pupulationRef.DrawClone() + diff --git a/tutorials/hist/th2polyUSA.C b/tutorials/hist/th2polyUSA.C index 5c089ee1820bf..234c7faf68d1a 100644 --- a/tutorials/hist/th2polyUSA.C +++ b/tutorials/hist/th2polyUSA.C @@ -1,7 +1,7 @@ /// \file /// \ingroup tutorial_hist /// \notebook -js -/// This tutorial illustrates how to create an histogram with polygonal +/// \preview This tutorial illustrates how to create an histogram with polygonal /// bins (TH2Poly), fill it and draw it using the `col` option. The initial data /// are stored in TMultiGraphs. They represent the USA map. Such histograms can /// be rendered in 3D using the option `legogl`. @@ -19,42 +19,37 @@ void th2polyUSA() { Int_t i, bin; const Int_t nx = 48; - const char *states [nx] = { - "alabama", "arizona", "arkansas", "california", - "colorado", "connecticut", "delaware", "florida", - "georgia", "idaho", "illinois", "indiana", - "iowa", "kansas", "kentucky", "louisiana", - "maine", "maryland", "massachusetts", "michigan", - "minnesota", "mississippi", "missouri", "montana", - "nebraska", "nevada", "new_hampshire", "new_jersey", - "new_mexico", "new_york", "north_carolina", "north_dakota", - "ohio", "oklahoma", "oregon", "pennsylvania", - "rhode_island", "south_carolina", "south_dakota", "tennessee", - "texas", "utah", "vermont", "virginia", - "washington", "west_virginia", "wisconsin", "wyoming" - }; - Double_t pop[nx] = { - 4708708, 6595778, 2889450, 36961664, 5024748, 3518288, 885122, 18537969, - 9829211, 1545801, 12910409, 6423113, 3007856, 2818747, 4314113, 4492076, - 1318301, 5699478, 6593587, 9969727, 5266214, 2951996, 5987580, 974989, - 1796619, 2643085, 1324575, 8707739, 2009671, 19541453, 9380884, 646844, - 11542645, 3687050, 3825657, 12604767, 1053209, 4561242, 812383, 6296254, - 24782302, 2784572, 621760, 7882590, 6664195, 1819777, 5654774, 544270 - }; + const char *states[nx] = { + "alabama", "arizona", "arkansas", "california", "colorado", "connecticut", "delaware", + "florida", "georgia", "idaho", "illinois", "indiana", "iowa", "kansas", + "kentucky", "louisiana", "maine", "maryland", "massachusetts", "michigan", "minnesota", + "mississippi", "missouri", "montana", "nebraska", "nevada", "new_hampshire", "new_jersey", + "new_mexico", "new_york", "north_carolina", "north_dakota", "ohio", "oklahoma", "oregon", + "pennsylvania", "rhode_island", "south_carolina", "south_dakota", "tennessee", "texas", "utah", + "vermont", "virginia", "washington", "west_virginia", "wisconsin", "wyoming"}; + Double_t pop[nx] = {4708708, 6595778, 2889450, 36961664, 5024748, 3518288, 885122, 18537969, 9829211, 1545801, + 12910409, 6423113, 3007856, 2818747, 4314113, 4492076, 1318301, 5699478, 6593587, 9969727, + 5266214, 2951996, 5987580, 974989, 1796619, 2643085, 1324575, 8707739, 2009671, 19541453, + 9380884, 646844, 11542645, 3687050, 3825657, 12604767, 1053209, 4561242, 812383, 6296254, + 24782302, 2784572, 621760, 7882590, 6664195, 1819777, 5654774, 544270}; - TCanvas *usa = new TCanvas("USA", "USA"); + auto usa = new TCanvas("USA", "USA"); usa->ToggleEventStatus(); Double_t lon1 = -130; Double_t lon2 = -65; Double_t lat1 = 24; Double_t lat2 = 50; - TH2Poly *p = new TH2Poly("Lower48","Lower 48 Population (2009);Latitude;Longitude",lon1,lon2,lat1,lat2); + auto p = new TH2Poly("Lower48", "Lower 48 Population (2009);Latitude;Longitude", lon1, lon2, lat1, lat2); - TFile::SetCacheFileDir("."); - TFile *f = TFile::Open("http://root.cern/files/usa.root", "CACHEREAD"); + const auto fileName = "usa.root"; + const auto fileNameUrl = "http://root.cern/files/usa.root"; + if(gSystem->AccessPathName(fileName)) + TFile::Cp(fileNameUrl, fileName); + + auto f = TFile::Open(fileName); if (!f) { - printf("Cannot access usa.root. Is internet working ?\n"); + printf("Cannot access %s. Is internet working ?\n", fileName); return; } @@ -62,16 +57,17 @@ void th2polyUSA() TMultiGraph *mg; TKey *key; TIter nextkey(gDirectory->GetListOfKeys()); - while ((key = (TKey*)nextkey())) { + while ((key = (TKey *)nextkey())) { TObject *obj = key->ReadObj(); if (obj->InheritsFrom("TMultiGraph")) { - mg = (TMultiGraph*)obj; + mg = (TMultiGraph *)obj; bin = p->AddBin(mg); } } // Fill TH2Poly, with capital letters for the states names - for (i=0; iFill(states[i], pop[i]); + for (i = 0; i < nx; i++) + p->Fill(states[i], pop[i]); gStyle->SetOptStat(0); p->Draw("colz textn"); diff --git a/tutorials/io/mergeFiles.py b/tutorials/io/mergeFiles.py new file mode 100644 index 0000000000000..e20b84bd57c43 --- /dev/null +++ b/tutorials/io/mergeFiles.py @@ -0,0 +1,70 @@ +# \file +# \ingroup tutorial_io +# \notebook -nodraw +# Illustrates how to merge two files using TFileMerger from Python. +# \author Giacomo Parolini + +import ROOT +import os +import random + +# abridged from hsimple.py +def CreateInputFile(fname): + with ROOT.TFile.Open( fname, "RECREATE", "Demo ROOT file with histograms" ) as hfile: + # Create some histograms, a profile histogram and an ntuple + hpx = ROOT.TH1F( "hpx", "This is the px distribution", 100, -4, 4 ) + hpxpy = ROOT.TH2F( "hpxpy", "py vs px", 40, -4, 4, 40, -4, 4 ) + hprof = ROOT.TProfile( "hprof", "Profile of pz versus px", 100, -4, 4, 0, 20 ) + ntuple = ROOT.TNtuple( "ntuple", "Demo ntuple", "px:py:pz:random:i" ) + + # Fill histograms randomly. + for i in range( 2000 ): + px = random.randrange(0, 1) + py = random.randrange(0, 1) + pz = px*px + py*py + r = random.randrange(0, 1) + + # Fill histograms. + hpx.Fill( px ) + hpxpy.Fill( px, py ) + hprof.Fill( px, pz ) + ntuple.Fill( px, py, pz, r, i ) + hfile.Write() + + +def MergeFiles(files_to_cleanup, nfiles): + # NOTE: when the TFileMerger is used in a `with` statement, it will automatically + # close its output file when going out of scope. + with ROOT.TFileMerger(False) as fm: + fm.OutputFile("merged.root") + files_to_cleanup.append(fm.GetOutputFile().GetName()) + for i in range(0, nfiles): + fm.AddFile(f"tomerge{i}.root") + + # New merging flags must be bitwise OR-ed on top of the default ones. + # Here, as an example, we are doing an incremental merging, meaning we want to merge the new + # files with the current content of the output file. + # See TFileMerger docs for all the flags available: + # https://root.cern/doc/master/classTFileMerger.html#a8ea43dc0722ce413c7332584d8c3ef0f + mode = ROOT.TFileMerger.kAll | ROOT.TFileMerger.kIncremental + fm.PartialMerge(mode) + fm.Reset() + + +if __name__ == '__main__': + nfiles = 2 + files_to_cleanup = [] + try: + # Create the files to be merged + for i in range(0, nfiles): + fname = f"tomerge{i}.root" + CreateInputFile(fname) + files_to_cleanup.append(fname) + + MergeFiles(files_to_cleanup, nfiles) + + finally: + # Cleanup initial files + for filename in files_to_cleanup: + os.remove(filename) + diff --git a/tutorials/hist/th2polyEurope.C b/tutorials/legacy/th2polyEurope.C similarity index 100% rename from tutorials/hist/th2polyEurope.C rename to tutorials/legacy/th2polyEurope.C diff --git a/tutorials/multicore/imt001_parBranchProcessing.C b/tutorials/multicore/imt001_parBranchProcessing.C deleted file mode 100644 index ea185b9c06c48..0000000000000 --- a/tutorials/multicore/imt001_parBranchProcessing.C +++ /dev/null @@ -1,58 +0,0 @@ -/// \file -/// \ingroup tutorial_multicore -/// \notebook -/// Demonstrate how to activate and use the implicit parallelisation of TTree::GetEntry. -/// Such parallelisation creates one task per top-level branch of the tree being read. -/// In this example, most of the branches are floating point numbers, which are very fast to read. -/// This parallelisation can be used, though, on bigger trees with many (complex) branches, which -/// are more likely to benefit from speedup gains. -/// -/// \macro_code -/// -/// \date 26/09/2016 -/// \author Enric Tejedor - -int imt001_parBranchProcessing() -{ - // First enable implicit multi-threading globally, so that the implicit parallelisation is on. - // The parameter of the call specifies the number of threads to use. - int nthreads = 4; - ROOT::EnableImplicitMT(nthreads); - - // Open the file containing the tree - auto file = TFile::Open("http://root.cern/files/h1/dstarmb.root"); - - // Get the tree - auto tree = file->Get("h42"); - - const auto nEntries = tree->GetEntries(); - - // Read the branches in parallel. - // Note that the interface does not change, the parallelisation is internal - for (auto i : ROOT::TSeqUL(nEntries)) { - tree->GetEntry(i); // parallel read - } - - // IMT parallelisation can be disabled for a specific tree - tree->SetImplicitMT(false); - - // If now GetEntry is invoked on the tree, the reading is sequential - for (auto i : ROOT::TSeqUL(nEntries)) { - tree->GetEntry(i); // sequential read - } - - // Parallel reading can be re-enabled - tree->SetImplicitMT(true); - - // IMT can be also disabled globally. - // As a result, no tree will run GetEntry in parallel - ROOT::DisableImplicitMT(); - - // This is still sequential: the global flag is disabled, even if the - // flag for this particular tree is enabled - for (auto i : ROOT::TSeqUL(nEntries)) { - tree->GetEntry(i); // sequential read - } - - return 0; -} diff --git a/tutorials/multicore/imt101_parTreeProcessing.C b/tutorials/multicore/imt101_parTreeProcessing.C index be9f5ada8a7f3..46da97e1d37e3 100644 --- a/tutorials/multicore/imt101_parTreeProcessing.C +++ b/tutorials/multicore/imt101_parTreeProcessing.C @@ -29,7 +29,7 @@ int imt101_parTreeProcessing() ROOT::TThreadedObject pxpyHist("px_py", "p_{X} vs p_{Y} Distribution;p_{X};p_{Y}", 100, -5., 5., 100, -5., 5.); // Create a TTreeProcessorMT: specify the file and the tree in it - ROOT::TTreeProcessorMT tp("http://root.cern/files/tp_process_imt.root", "events"); + ROOT::TTreeProcessorMT tp("root://eospublic.cern.ch//eos/root-eos/testfiles/tp_process_imt.root", "events"); // Define the function that will process a subrange of the tree. // The function must receive only one parameter, a TTreeReader, diff --git a/tutorials/multicore/mp103_processSelector.C b/tutorials/multicore/mp103_processSelector.C index 74e26d97e3a73..69fd366570767 100644 --- a/tutorials/multicore/mp103_processSelector.C +++ b/tutorials/multicore/mp103_processSelector.C @@ -19,11 +19,11 @@ #include -const auto file0 = "http://root.cern/files/h1/dstarmb.root"; +const auto file0 = "root://eospublic.cern.ch//eos/root-eos/h1/dstarmb.root"; const std::vector files = {file0, - "http://root.cern/files/h1/dstarp1a.root", - "http://root.cern/files/h1/dstarp1b.root", - "http://root.cern/files/h1/dstarp2.root"}; + "root://eospublic.cern.ch//eos/root-eos/h1/dstarp1a.root", + "root://eospublic.cern.ch//eos/root-eos/h1/dstarp1b.root", + "root://eospublic.cern.ch//eos/root-eos/h1/dstarp2.root"}; int mp103_processSelector() { diff --git a/tutorials/multicore/mp104_processH1.C b/tutorials/multicore/mp104_processH1.C index 066c68e9bf729..d4b7a0f7cab3c 100644 --- a/tutorials/multicore/mp104_processH1.C +++ b/tutorials/multicore/mp104_processH1.C @@ -31,10 +31,10 @@ static std::string tutname = "mp104_processH1: "; static std::string logfile = "mp104_processH1.log"; static RedirectHandle_t gRH; -std::vector files {"http://root.cern/files/h1/dstarmb.root", - "http://root.cern/files/h1/dstarp1a.root", - "http://root.cern/files/h1/dstarp1b.root", - "http://root.cern/files/h1/dstarp2.root"}; +std::vector files {"root://eospublic.cern.ch//eos/root-eos/h1/dstarmb.root", + "root://eospublic.cern.ch//eos/root-eos/h1/dstarp1a.root", + "root://eospublic.cern.ch//eos/root-eos/h1/dstarp1b.root", + "root://eospublic.cern.ch//eos/root-eos/h1/dstarp2.root"}; int mp104_processH1() { diff --git a/tutorials/net/parallelMergeClient.C b/tutorials/net/parallelMergeClient.C index 9527f198876a3..f0b41f8e46e5c 100644 --- a/tutorials/net/parallelMergeClient.C +++ b/tutorials/net/parallelMergeClient.C @@ -6,8 +6,8 @@ /// To run this demo do the following: /// - Open at least 2 windows /// - Start ROOT in the first windows -/// - Execute in the first window: .x fastMergeServer.C -/// - Execute in the other windows: root.exe -b -l -q .x treeClient.C +/// - Execute in the first window: .x parallelMergeServer.C +/// - Execute in the other windows: root.exe -b -l -q .x 'parallelMergeClient.C("")' /// (You can put it in the background if wanted). /// If you want to run the hserv.C on a different host, just change /// "localhost" in the TSocket ctor below to the desired hostname. @@ -25,15 +25,18 @@ #include "TRandom.h" #include "TError.h" -void parallelMergeClient() +#include + +void parallelMergeClient(const std::string &socketPath) { gBenchmark->Start("treeClient"); - TParallelMergingFile *file = (TParallelMergingFile*)TFile::Open("mergedClient.root?pmerge=localhost:1095","RECREATE"); + TParallelMergingFile *file = + (TParallelMergingFile *)TFile::Open((std::string("mergedClient.root?pmerge=") + socketPath).c_str(), "RECREATE"); file->Write(); file->UploadAndReset(); // We do this early to get assigned an index. - UInt_t idx = file->fServerIdx; // This works on in ACLiC. + UInt_t idx = file->GetServerIdx(); TH1 *hpx; if (idx%2 == 0) { diff --git a/tutorials/net/parallelMergeServer.C b/tutorials/net/parallelMergeServer.C index cb0a9c270fb05..702841d827a7d 100644 --- a/tutorials/net/parallelMergeServer.C +++ b/tutorials/net/parallelMergeServer.C @@ -14,8 +14,8 @@ /// To run this demo do the following: /// - Open three windows /// - Start ROOT in all three windows -/// - Execute in the first window: .x hserv2.C -/// - Execute in the second and third windows: .x hclient.C +/// - Execute in the first window: .x parallelMergeServer.C +/// - Execute in the second and third windows: .x parallelMergeClient.C("") /// /// \macro_code /// @@ -319,10 +319,23 @@ struct ParallelFileMerger : public TObject }; void parallelMergeServer(bool cache = false) { - // Open a server socket looking for connections on a named service or - // on a specified port. - //TServerSocket *ss = new TServerSocket("rootserv", kTRUE); - TServerSocket *ss = new TServerSocket(1095, kTRUE, 100); + // Open a server socket looking for connections on a named service + TString socketPath = "rootserv."; // prefix for temporary file in the temp folder + // Get a unique, temporary file name for the socket. We remove and close the file + // immediatly in order to reopen it as a socket. There is a race here: between + // the removal and the creation of the socket, the file could have been recreated. + // But it is unlikely (due to the random letters in the name) and harmless: the socket + // cannot be created in this case. + FILE *dummy = gSystem->TempFileName(socketPath); + if (!dummy) { + Error("fastMergeServer", "Cannot create temporary file for socket\n"); + return; + } + + std::string strSocketPath(socketPath.View()); + remove(strSocketPath.c_str()); + fclose(dummy); + TServerSocket *ss = new TServerSocket(socketPath); if (!ss->IsValid()) { return; } @@ -343,7 +356,7 @@ void parallelMergeServer(bool cache = false) { kProtocolVersion = 1 }; - printf("fastMergeServerHist ready to accept connections\n"); + printf("fastMergeServerHist ready to accept connections on %s\n", strSocketPath.c_str()); while (true) { TMessage *mess; TSocket *s; @@ -438,5 +451,6 @@ void parallelMergeServer(bool cache = false) { mergers.Delete(); delete mon; + remove(strSocketPath.c_str()); delete ss; } diff --git a/tutorials/pyroot/pyroot005_tfile_context_manager.py b/tutorials/pyroot/pyroot005_tfile_context_manager.py index 26e3e7b7746b9..5760a6bc8128b 100644 --- a/tutorials/pyroot/pyroot005_tfile_context_manager.py +++ b/tutorials/pyroot/pyroot005_tfile_context_manager.py @@ -42,10 +42,11 @@ # When the TFile.Close method is called, the current directory is automatically # set again to ROOT.gROOT. Objects that were attached to the file inside the -# context are automatically deleted and made 'None' when the file is closed. +# context are automatically deleted, so they can't be accessed anymore after +# the file is closed. print("Status after the first TFile context manager:") print(" Current directory: '{}'.".format(ROOT.gDirectory.GetName())) -print(" Accessing 'histo_2' gives: '{}'.\n".format(histo_2)) +# print(histo_2) # the object is deleted at this point, so don't use it anymore! # Also reading data from a TFile can be done in a context manager. Information # stored in the objects of the file can be queried and used inside the context. diff --git a/tutorials/quadp/portfolio.C b/tutorials/quadp/portfolio.C index 7ed61f018e84b..c7c0826ff900d 100644 --- a/tutorials/quadp/portfolio.C +++ b/tutorials/quadp/portfolio.C @@ -297,6 +297,7 @@ void portfolio() printf("accessing %s file from http://root.cern/files\n",fname); f = TFile::Open(Form("http://root.cern/files/%s",fname)); } + if (!f) return; TArrayF *data = new TArrayF[nrStocks]; diff --git a/tutorials/roofit/rf615_simulation_based_inference.py b/tutorials/roofit/rf615_simulation_based_inference.py index a0359e64ee56f..50ab4c0ce0285 100644 --- a/tutorials/roofit/rf615_simulation_based_inference.py +++ b/tutorials/roofit/rf615_simulation_based_inference.py @@ -271,8 +271,3 @@ def learned_likelihood_ratio(x, mu): del nllr_learned del nll_gauss del workspace - -import sys - -# Hack to bypass ClearProxiedObjects() -del sys.modules["libROOTPythonizations"] diff --git a/tutorials/roofit/rf617_simulation_based_inference_multidimensional.py b/tutorials/roofit/rf617_simulation_based_inference_multidimensional.py index 0326f336b2298..b358b699e9689 100644 --- a/tutorials/roofit/rf617_simulation_based_inference_multidimensional.py +++ b/tutorials/roofit/rf617_simulation_based_inference_multidimensional.py @@ -328,8 +328,3 @@ def learned_likelihood_ratio(*args): minimizer.minimize("Minuit2") result = minimizer.save() result.Print() - -import sys - -# Hack to bypass ClearProxiedObjects() -del sys.modules["libROOTPythonizations"] diff --git a/tutorials/roofit/rf618_mixture_models.py b/tutorials/roofit/rf618_mixture_models.py index ea0c2c3ef65df..6d9c1985d079a 100644 --- a/tutorials/roofit/rf618_mixture_models.py +++ b/tutorials/roofit/rf618_mixture_models.py @@ -205,8 +205,3 @@ def likelihood_ratio(llr: np.ndarray, mu: np.ndarray) -> np.ndarray: del n_pred del llh del nll_ratio - -import sys - -# Hack to bypass ClearProxiedObjects() -del sys.modules["libROOTPythonizations"] diff --git a/tutorials/roofit/rf903_numintcache.C b/tutorials/roofit/rf903_numintcache.C index 31a548448a6e7..823b0158d49b5 100644 --- a/tutorials/roofit/rf903_numintcache.C +++ b/tutorials/roofit/rf903_numintcache.C @@ -93,7 +93,7 @@ RooWorkspace *getWorkspace(Int_t mode) if (mode != 2) { // Create empty workspace workspace - w = new RooWorkspace("w", 1); + w = new RooWorkspace("w"); // Make a difficult to normalize pdf in 3 dimensions that is integrated numerically. w->factory("EXPR::model('1/((x-a)*(x-a)+0.01)+1/((y-a)*(y-a)+0.01)+1/" diff --git a/tutorials/roofit/rf903_numintcache.py b/tutorials/roofit/rf903_numintcache.py index 397bc20f49c03..f43946ff8109e 100644 --- a/tutorials/roofit/rf903_numintcache.py +++ b/tutorials/roofit/rf903_numintcache.py @@ -26,7 +26,7 @@ def getWorkspace(mode): if mode != 2: # Create empty workspace workspace - w = ROOT.RooWorkspace("w", 1) + w = ROOT.RooWorkspace("w") # Make a difficult to normalize pdf in 3 dimensions that is # integrated numerically. diff --git a/tutorials/roostats/TwoSidedFrequentistUpperLimitWithBands.py b/tutorials/roostats/TwoSidedFrequentistUpperLimitWithBands.py index 0d90077cc2134..9e9495a9993f1 100644 --- a/tutorials/roostats/TwoSidedFrequentistUpperLimitWithBands.py +++ b/tutorials/roostats/TwoSidedFrequentistUpperLimitWithBands.py @@ -225,6 +225,7 @@ # make a histogram of parameter vs. threshold histOfThresholds = ROOT.TH1F("histOfThresholds", "", parameterScan.numEntries(), firstPOI.getMin(), firstPOI.getMax()) +histOfThresholds.SetDirectory(ROOT.nullptr) # so th histogram doesn't get attached to the file with the workspace histOfThresholds.GetXaxis().SetTitle(firstPOI.GetName()) histOfThresholds.GetYaxis().SetTitle("Threshold") @@ -271,6 +272,7 @@ # Now we generate background only and find distribution of upper limits histOfUL = ROOT.TH1F("histOfUL", "", 100, 0, firstPOI.getMax()) +histOfUL.SetDirectory(ROOT.nullptr) # make sure the histogram doesn't get attached to the file with the workspace histOfUL.GetXaxis().SetTitle("Upper Limit (background only)") histOfUL.GetYaxis().SetTitle("Entries") for imc in range(nToyMC): @@ -340,6 +342,10 @@ # for few events, data is often the same, and UL is often the same # print("thisUL = ", thisUL) +# At this point we can close the input file, since the RooWorkspace is not used +# anymore. +inputFile.Close() + histOfUL.Draw() c1.SaveAs("two-sided_upper_limit_output.pdf") @@ -356,7 +362,6 @@ # Now find bands and power constraint bins = histOfUL.GetIntegral() -cumulative = ROOT.TH1F() cumulative = histOfUL.Clone("cumulative") cumulative.SetContent(bins) band2sigDown = 0 diff --git a/tutorials/tmva/RBatchGenerator_NumPy.py b/tutorials/tmva/RBatchGenerator_NumPy.py index 7585135df55cc..59503bf160b3d 100644 --- a/tutorials/tmva/RBatchGenerator_NumPy.py +++ b/tutorials/tmva/RBatchGenerator_NumPy.py @@ -16,20 +16,22 @@ batch_size = 128 chunk_size = 5_000 -ds_train, ds_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( - tree_name, - file_name, +rdataframe = ROOT.RDataFrame(tree_name, file_name) + +gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( + rdataframe, batch_size, chunk_size, validation_split=0.3, shuffle=True, + drop_remainder=False ) # Loop through training set -for i, b in enumerate(ds_train): +for i, b in enumerate(gen_train): print(f"Training batch {i} => {b.shape}") # Loop through Validation set -for i, b in enumerate(ds_validation): +for i, b in enumerate(gen_validation): print(f"Validation batch {i} => {b.shape}") diff --git a/tutorials/tmva/RBatchGenerator_PyTorch.py b/tutorials/tmva/RBatchGenerator_PyTorch.py index 480ba9b692cb3..bcb84ae3b231c 100644 --- a/tutorials/tmva/RBatchGenerator_PyTorch.py +++ b/tutorials/tmva/RBatchGenerator_PyTorch.py @@ -17,13 +17,14 @@ batch_size = 128 chunk_size = 5_000 +rdataframe = ROOT.RDataFrame(tree_name, file_name) + target = "Type" # Returns two generators that return training and validation batches # as PyTorch tensors. gen_train, gen_validation = ROOT.TMVA.Experimental.CreatePyTorchGenerators( - tree_name, - file_name, + rdataframe, batch_size, chunk_size, target=target, @@ -53,31 +54,33 @@ def calc_accuracy(targets, pred): loss_fn = torch.nn.MSELoss(reduction="mean") optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9) +number_of_epochs = 2 -# Loop through the training set and train model -for i, (x_train, y_train) in enumerate(gen_train): - # Make prediction and calculate loss - pred = model(x_train).view(-1) - loss = loss_fn(pred, y_train) +for _ in range(number_of_epochs): + # Loop through the training set and train model + for i, (x_train, y_train) in enumerate(gen_train): + # Make prediction and calculate loss + pred = model(x_train) + loss = loss_fn(pred, y_train) - # improve model - model.zero_grad() - loss.backward() - optimizer.step() + # improve model + model.zero_grad() + loss.backward() + optimizer.step() - # Calculate accuracy - accuracy = calc_accuracy(y_train, pred) + # Calculate accuracy + accuracy = calc_accuracy(y_train, pred) - print(f"Training => accuracy: {accuracy}") + print(f"Training => accuracy: {accuracy}") -################################################################# -# Validation -################################################################# + ################################################################# + # Validation + ################################################################# -# Evaluate the model on the validation set -for i, (x_train, y_train) in enumerate(gen_validation): - # Make prediction and calculate accuracy - pred = model(x_train).view(-1) - accuracy = calc_accuracy(y_train, pred) + # Evaluate the model on the validation set + for i, (x_train, y_train) in enumerate(gen_validation): + # Make prediction and calculate accuracy + pred = model(x_train) + accuracy = calc_accuracy(y_train, pred) - print(f"Validation => accuracy: {accuracy}") + print(f"Validation => accuracy: {accuracy}") diff --git a/tutorials/tmva/RBatchGenerator_TensorFlow.py b/tutorials/tmva/RBatchGenerator_TensorFlow.py index 4885a22e4415a..5a6995a5381d9 100644 --- a/tutorials/tmva/RBatchGenerator_TensorFlow.py +++ b/tutorials/tmva/RBatchGenerator_TensorFlow.py @@ -17,18 +17,29 @@ batch_size = 128 chunk_size = 5_000 +rdataframe = ROOT.RDataFrame(tree_name, file_name) + target = "Type" # Returns two TF.Dataset for training and validation batches. ds_train, ds_valid = ROOT.TMVA.Experimental.CreateTFDatasets( - tree_name, - file_name, + rdataframe, batch_size, chunk_size, validation_split=0.3, target=target, ) +num_of_epochs = 2 + +# Datasets have to be repeated as many times as there are epochs +ds_train_repeated = ds_train.repeat(num_of_epochs) +ds_valid_repeated = ds_valid.repeat(num_of_epochs) + +# Number of batches per epoch must be given for model.fit +train_batches_per_epoch = ds_train.number_of_batches +validation_batches_per_epoch = ds_valid.number_of_batches + # Get a list of the columns used for training input_columns = ds_train.train_columns num_features = len(input_columns) @@ -39,10 +50,9 @@ # Define TensorFlow model model = tf.keras.Sequential( - [ - tf.keras.layers.Dense( - 300, activation=tf.nn.tanh, input_shape=(num_features,) - ), # input shape required + [ + tf.keras.layers.Input(shape=(num_features,)), + tf.keras.layers.Dense(300, activation=tf.nn.tanh), tf.keras.layers.Dense(300, activation=tf.nn.tanh), tf.keras.layers.Dense(300, activation=tf.nn.tanh), tf.keras.layers.Dense(1, activation=tf.nn.sigmoid), @@ -52,4 +62,5 @@ model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"]) # Train model -model.fit(ds_train, validation_data=ds_valid, epochs=2) +model.fit(ds_train_repeated, steps_per_epoch=train_batches_per_epoch, validation_data=ds_valid_repeated,\ + validation_steps=validation_batches_per_epoch, epochs=num_of_epochs) diff --git a/tutorials/tmva/RBatchGenerator_filters_vectors.py b/tutorials/tmva/RBatchGenerator_filters_vectors.py index 58aef0642b33f..edd3d12551113 100644 --- a/tutorials/tmva/RBatchGenerator_filters_vectors.py +++ b/tutorials/tmva/RBatchGenerator_filters_vectors.py @@ -15,17 +15,20 @@ chunk_size = 50 # Defines the size of the chunks batch_size = 5 # Defines the size of the returned batches -# Define filters as strings -filters = ["f1 > 30", "f2 < 70", "f3 == true"] +rdataframe = ROOT.RDataFrame(tree_name, file_name) + +# Define filters, filters must be named +filteredrdf = rdataframe.Filter("f1 > 30", "first_filter")\ + .Filter("f2 < 70", "second_filter")\ + .Filter("f3==true", "third_filter") + max_vec_sizes = {"f4": 3, "f5": 2, "f6": 1} ds_train, ds_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators( - tree_name, - file_name, + filteredrdf, batch_size, chunk_size, validation_split=0.3, - filters=filters, max_vec_sizes=max_vec_sizes, shuffle=True, ) diff --git a/tutorials/tmva/keras/ClassificationKeras.py b/tutorials/tmva/keras/ClassificationKeras.py index dc53228d40dfc..50f92bf19fa9e 100755 --- a/tutorials/tmva/keras/ClassificationKeras.py +++ b/tutorials/tmva/keras/ClassificationKeras.py @@ -1,70 +1,81 @@ #!/usr/bin/env python -## \file -## \ingroup tutorial_tmva_keras -## \notebook -nodraw -## This tutorial shows how to do classification in TMVA with neural networks -## trained with keras. -## -## \macro_code -## -## \date 2017 -## \author TMVA Team - -from ROOT import TMVA, TFile, TTree, TCut +# \file +# \ingroup tutorial_tmva_keras +# \notebook -nodraw +# This tutorial shows how to do classification in TMVA with neural networks +# trained with keras. +# +# \macro_code +# +# \date 2017 +# \author TMVA Team + +from ROOT import TMVA, TFile, TCut from subprocess import call from os.path import isfile from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense, Activation +from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import SGD -# Setup TMVA -TMVA.Tools.Instance() -TMVA.PyMethodBase.PyInitialize() -output = TFile.Open('TMVA_Classification_Keras.root', 'RECREATE') -factory = TMVA.Factory('TMVAClassification', output, - '!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=Classification') +def create_model(): + # Generate model + + # Define model + model = Sequential() + model.add(Dense(64, activation='relu', input_dim=4)) + model.add(Dense(2, activation='softmax')) + + # Set loss and optimizer + model.compile(loss='categorical_crossentropy', + optimizer=SGD(learning_rate=0.01), weighted_metrics=['accuracy', ]) + + # Store model to file + model.save('modelClassification.h5') + model.summary() + + +def run(): + with TFile.Open('TMVA_Classification_Keras.root', 'RECREATE') as output, TFile.Open('tmva_class_example.root') as data: + factory = TMVA.Factory('TMVAClassification', output, + '!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=Classification') -# Load data -if not isfile('tmva_class_example.root'): - call(['curl', '-L', '-O', 'http://root.cern/files/tmva_class_example.root']) + signal = data.Get('TreeS') + background = data.Get('TreeB') -data = TFile.Open('tmva_class_example.root') -signal = data.Get('TreeS') -background = data.Get('TreeB') + dataloader = TMVA.DataLoader('dataset') + for branch in signal.GetListOfBranches(): + dataloader.AddVariable(branch.GetName()) -dataloader = TMVA.DataLoader('dataset') -for branch in signal.GetListOfBranches(): - dataloader.AddVariable(branch.GetName()) + dataloader.AddSignalTree(signal, 1.0) + dataloader.AddBackgroundTree(background, 1.0) + dataloader.PrepareTrainingAndTestTree(TCut(''), + 'nTrain_Signal=4000:nTrain_Background=4000:SplitMode=Random:NormMode=NumEvents:!V') -dataloader.AddSignalTree(signal, 1.0) -dataloader.AddBackgroundTree(background, 1.0) -dataloader.PrepareTrainingAndTestTree(TCut(''), - 'nTrain_Signal=4000:nTrain_Background=4000:SplitMode=Random:NormMode=NumEvents:!V') + # Book methods + factory.BookMethod(dataloader, TMVA.Types.kFisher, 'Fisher', + '!H:!V:Fisher:VarTransform=D,G') + factory.BookMethod(dataloader, TMVA.Types.kPyKeras, 'PyKeras', + 'H:!V:VarTransform=D,G:FilenameModel=modelClassification.h5:FilenameTrainedModel=trainedModelClassification.h5:NumEpochs=20:BatchSize=32') -# Generate model + # Run training, test and evaluation + factory.TrainAllMethods() + factory.TestAllMethods() + factory.EvaluateAllMethods() -# Define model -model = Sequential() -model.add(Dense(64, activation='relu', input_dim=4)) -model.add(Dense(2, activation='softmax')) -# Set loss and optimizer -model.compile(loss='categorical_crossentropy', - optimizer=SGD(learning_rate=0.01), weighted_metrics=['accuracy', ]) +if __name__ == "__main__": + # Setup TMVA + TMVA.Tools.Instance() + TMVA.PyMethodBase.PyInitialize() -# Store model to file -model.save('modelClassification.h5') -model.summary() + # Create and store the ML model + create_model() -# Book methods -factory.BookMethod(dataloader, TMVA.Types.kFisher, 'Fisher', - '!H:!V:Fisher:VarTransform=D,G') -factory.BookMethod(dataloader, TMVA.Types.kPyKeras, 'PyKeras', - 'H:!V:VarTransform=D,G:FilenameModel=modelClassification.h5:FilenameTrainedModel=trainedModelClassification.h5:NumEpochs=20:BatchSize=32') + # Load data + if not isfile('tmva_class_example.root'): + call(['curl', '-L', '-O', 'http://root.cern/files/tmva_class_example.root']) -# Run training, test and evaluation -factory.TrainAllMethods() -factory.TestAllMethods() -factory.EvaluateAllMethods() + # Run TMVA + run() diff --git a/tutorials/tmva/keras/MulticlassKeras.py b/tutorials/tmva/keras/MulticlassKeras.py index d22fadec67f0e..7018e927f60c3 100755 --- a/tutorials/tmva/keras/MulticlassKeras.py +++ b/tutorials/tmva/keras/MulticlassKeras.py @@ -1,75 +1,85 @@ #!/usr/bin/env python -## \file -## \ingroup tutorial_tmva_keras -## \notebook -nodraw -## This tutorial shows how to do multiclass classification in TMVA with neural -## networks trained with keras. -## -## \macro_code -## -## \date 2017 -## \author TMVA Team - -from ROOT import TMVA, TFile, TTree, TCut, gROOT +# \file +# \ingroup tutorial_tmva_keras +# \notebook -nodraw +# This tutorial shows how to do multiclass classification in TMVA with neural +# networks trained with keras. +# +# \macro_code +# +# \date 2017 +# \author TMVA Team + +from ROOT import TMVA, TFile, TCut, gROOT from os.path import isfile from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense, Activation +from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import SGD -# Setup TMVA -TMVA.Tools.Instance() -TMVA.PyMethodBase.PyInitialize() - -output = TFile.Open('TMVA.root', 'RECREATE') -factory = TMVA.Factory('TMVAClassification', output, - '!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=multiclass') - -# Load data -if not isfile('tmva_example_multiple_background.root'): - createDataMacro = str(gROOT.GetTutorialDir()) + '/tmva/createData.C' - print(createDataMacro) - gROOT.ProcessLine('.L {}'.format(createDataMacro)) - gROOT.ProcessLine('create_MultipleBackground(4000)') - -data = TFile.Open('tmva_example_multiple_background.root') -signal = data.Get('TreeS') -background0 = data.Get('TreeB0') -background1 = data.Get('TreeB1') -background2 = data.Get('TreeB2') - -dataloader = TMVA.DataLoader('dataset') -for branch in signal.GetListOfBranches(): - dataloader.AddVariable(branch.GetName()) - -dataloader.AddTree(signal, 'Signal') -dataloader.AddTree(background0, 'Background_0') -dataloader.AddTree(background1, 'Background_1') -dataloader.AddTree(background2, 'Background_2') -dataloader.PrepareTrainingAndTestTree(TCut(''), - 'SplitMode=Random:NormMode=NumEvents:!V') - -# Generate model - -# Define model -model = Sequential() -model.add(Dense(32, activation='relu', input_dim=4)) -model.add(Dense(4, activation='softmax')) - -# Set loss and optimizer -model.compile(loss='categorical_crossentropy', optimizer=SGD(learning_rate=0.01), weighted_metrics=['accuracy',]) - -# Store model to file -model.save('modelMultiClass.h5') -model.summary() - -# Book methods -factory.BookMethod(dataloader, TMVA.Types.kFisher, 'Fisher', - '!H:!V:Fisher:VarTransform=D,G') -factory.BookMethod(dataloader, TMVA.Types.kPyKeras, 'PyKeras', - 'H:!V:VarTransform=D,G:FilenameModel=modelMultiClass.h5:FilenameTrainedModel=trainedModelMultiClass.h5:NumEpochs=20:BatchSize=32') - -# Run TMVA -factory.TrainAllMethods() -factory.TestAllMethods() -factory.EvaluateAllMethods() + +def create_model(): + # Define model + model = Sequential() + model.add(Dense(32, activation='relu', input_dim=4)) + model.add(Dense(4, activation='softmax')) + + # Set loss and optimizer + model.compile(loss='categorical_crossentropy', optimizer=SGD( + learning_rate=0.01), weighted_metrics=['accuracy',]) + + # Store model to file + model.save('modelMultiClass.h5') + model.summary() + + +def run(): + with TFile.Open('TMVA.root', 'RECREATE') as output, TFile.Open('tmva_example_multiple_background.root') as data: + factory = TMVA.Factory('TMVAClassification', output, + '!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=multiclass') + + signal = data.Get('TreeS') + background0 = data.Get('TreeB0') + background1 = data.Get('TreeB1') + background2 = data.Get('TreeB2') + + dataloader = TMVA.DataLoader('dataset') + for branch in signal.GetListOfBranches(): + dataloader.AddVariable(branch.GetName()) + + dataloader.AddTree(signal, 'Signal') + dataloader.AddTree(background0, 'Background_0') + dataloader.AddTree(background1, 'Background_1') + dataloader.AddTree(background2, 'Background_2') + dataloader.PrepareTrainingAndTestTree(TCut(''), + 'SplitMode=Random:NormMode=NumEvents:!V') + + # Book methods + factory.BookMethod(dataloader, TMVA.Types.kFisher, 'Fisher', + '!H:!V:Fisher:VarTransform=D,G') + factory.BookMethod(dataloader, TMVA.Types.kPyKeras, 'PyKeras', + 'H:!V:VarTransform=D,G:FilenameModel=modelMultiClass.h5:FilenameTrainedModel=trainedModelMultiClass.h5:NumEpochs=20:BatchSize=32') + + # Run TMVA + factory.TrainAllMethods() + factory.TestAllMethods() + factory.EvaluateAllMethods() + + +if __name__ == "__main__": + # Generate model + create_model() + + # Setup TMVA + TMVA.Tools.Instance() + TMVA.PyMethodBase.PyInitialize() + + # Load data + if not isfile('tmva_example_multiple_background.root'): + createDataMacro = str(gROOT.GetTutorialDir()) + '/tmva/createData.C' + print(createDataMacro) + gROOT.ProcessLine('.L {}'.format(createDataMacro)) + gROOT.ProcessLine('create_MultipleBackground(4000)') + + # Run TMVA + run() diff --git a/tutorials/tmva/keras/RegressionKeras.py b/tutorials/tmva/keras/RegressionKeras.py index 759bc47328ecc..72e4ebdf704d9 100755 --- a/tutorials/tmva/keras/RegressionKeras.py +++ b/tutorials/tmva/keras/RegressionKeras.py @@ -1,71 +1,82 @@ #!/usr/bin/env python -## \file -## \ingroup tutorial_tmva_keras -## \notebook -nodraw -## This tutorial shows how to do regression in TMVA with neural networks -## trained with keras. -## -## \macro_code -## -## \date 2017 -## \author TMVA Team - -from ROOT import TMVA, TFile, TTree, TCut +# \file +# \ingroup tutorial_tmva_keras +# \notebook -nodraw +# This tutorial shows how to do regression in TMVA with neural networks +# trained with keras. +# +# \macro_code +# +# \date 2017 +# \author TMVA Team + +from ROOT import TMVA, TFile, TCut from subprocess import call from os.path import isfile from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense, Activation +from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import SGD -# Setup TMVA -TMVA.Tools.Instance() -TMVA.PyMethodBase.PyInitialize() - -output = TFile.Open('TMVA_Regression_Keras.root', 'RECREATE') -factory = TMVA.Factory('TMVARegression', output, - '!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=Regression') - -# Load data -if not isfile('tmva_reg_example.root'): - call(['curl', '-L', '-O', 'http://root.cern/files/tmva_reg_example.root']) - -data = TFile.Open('tmva_reg_example.root') -tree = data.Get('TreeR') - -dataloader = TMVA.DataLoader('dataset') -for branch in tree.GetListOfBranches(): - name = branch.GetName() - if name != 'fvalue': - dataloader.AddVariable(name) -dataloader.AddTarget('fvalue') - -dataloader.AddRegressionTree(tree, 1.0) -#use only 1000 events since evaluation is very slow (especially on MacOS). Increase it to get meaningful results -dataloader.PrepareTrainingAndTestTree(TCut(''), - 'nTrain_Regression=1000:SplitMode=Random:NormMode=NumEvents:!V') - -# Generate model - -# Define model -model = Sequential() -model.add(Dense(64, activation='tanh', input_dim=2)) -model.add(Dense(1, activation='linear')) - -# Set loss and optimizer -model.compile(loss='mean_squared_error', optimizer=SGD(learning_rate=0.01), weighted_metrics=[]) - -# Store model to file -model.save('modelRegression.h5') -model.summary() - -# Book methods -factory.BookMethod(dataloader, TMVA.Types.kPyKeras, 'PyKeras', - 'H:!V:VarTransform=D,G:FilenameModel=modelRegression.h5:FilenameTrainedModel=trainedModelRegression.h5:NumEpochs=20:BatchSize=32') -factory.BookMethod(dataloader, TMVA.Types.kBDT, 'BDTG', - '!H:!V:VarTransform=D,G:NTrees=1000:BoostType=Grad:Shrinkage=0.1:UseBaggedBoost:BaggedSampleFraction=0.5:nCuts=20:MaxDepth=4') - -# Run TMVA -factory.TrainAllMethods() -factory.TestAllMethods() -factory.EvaluateAllMethods() + +def create_model(): + # Define model + model = Sequential() + model.add(Dense(64, activation='tanh', input_dim=2)) + model.add(Dense(1, activation='linear')) + + # Set loss and optimizer + model.compile(loss='mean_squared_error', optimizer=SGD( + learning_rate=0.01), weighted_metrics=[]) + + # Store model to file + model.save('modelRegression.h5') + model.summary() + + +def run(): + + with TFile.Open('TMVA_Regression_Keras.root', 'RECREATE') as output, TFile.Open('tmva_reg_example.root') as data: + factory = TMVA.Factory('TMVARegression', output, + '!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=Regression') + + tree = data.Get('TreeR') + + dataloader = TMVA.DataLoader('dataset') + for branch in tree.GetListOfBranches(): + name = branch.GetName() + if name != 'fvalue': + dataloader.AddVariable(name) + dataloader.AddTarget('fvalue') + + dataloader.AddRegressionTree(tree, 1.0) + # use only 1000 events since evaluation is very slow (especially on MacOS). Increase it to get meaningful results + dataloader.PrepareTrainingAndTestTree(TCut(''), + 'nTrain_Regression=1000:SplitMode=Random:NormMode=NumEvents:!V') + + # Book methods + factory.BookMethod(dataloader, TMVA.Types.kPyKeras, 'PyKeras', + 'H:!V:VarTransform=D,G:FilenameModel=modelRegression.h5:FilenameTrainedModel=trainedModelRegression.h5:NumEpochs=20:BatchSize=32') + factory.BookMethod(dataloader, TMVA.Types.kBDT, 'BDTG', + '!H:!V:VarTransform=D,G:NTrees=1000:BoostType=Grad:Shrinkage=0.1:UseBaggedBoost:BaggedSampleFraction=0.5:nCuts=20:MaxDepth=4') + + # Run TMVA + factory.TrainAllMethods() + factory.TestAllMethods() + factory.EvaluateAllMethods() + + +if __name__ == "__main__": + # Setup TMVA + TMVA.Tools.Instance() + TMVA.PyMethodBase.PyInitialize() + + # Load data + if not isfile('tmva_reg_example.root'): + call(['curl', '-L', '-O', 'http://root.cern/files/tmva_reg_example.root']) + + # Generate model + create_model() + + # Run TMVA + run() diff --git a/tutorials/tree/run_h1analysis.C b/tutorials/tree/run_h1analysis.C index a64aeea1392c9..a5b42e92e4771 100644 --- a/tutorials/tree/run_h1analysis.C +++ b/tutorials/tree/run_h1analysis.C @@ -25,7 +25,7 @@ void run_h1analysis(int type = 0, const char * h1dir = 0) { gSystem->Setenv("H1",h1dir); } else - gSystem->Setenv("H1","http://root.cern/files/h1/"); + gSystem->Setenv("H1","root://eospublic.cern.ch//eos/root-eos/h1/"); std::cout << "Creating the chain" << std::endl; diff --git a/tutorials/v7/ntuple/ntpl001_staff.C b/tutorials/v7/ntuple/ntpl001_staff.C index 6b32cf103b0d5..fc98d78d0c848 100644 --- a/tutorials/v7/ntuple/ntpl001_staff.C +++ b/tutorials/v7/ntuple/ntpl001_staff.C @@ -12,8 +12,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl002_vector.C b/tutorials/v7/ntuple/ntpl002_vector.C index fbfc2a332bd7c..1f1f8ae37bd95 100644 --- a/tutorials/v7/ntuple/ntpl002_vector.C +++ b/tutorials/v7/ntuple/ntpl002_vector.C @@ -10,8 +10,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl004_dimuon.C b/tutorials/v7/ntuple/ntpl004_dimuon.C index df526b8fddd89..cbd16354675fb 100644 --- a/tutorials/v7/ntuple/ntpl004_dimuon.C +++ b/tutorials/v7/ntuple/ntpl004_dimuon.C @@ -14,8 +14,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include @@ -37,7 +36,7 @@ // Import classes from experimental namespace for the time being using RNTupleDS = ROOT::Experimental::RNTupleDS; -constexpr char const *kNTupleFileName = "http://root.cern/files/tutorials/ntpl004_dimuon_v1rc3.root"; +constexpr char const *kNTupleFileName = "http://root.cern/files/tutorials/ntpl004_dimuon_v1.root"; using namespace ROOT::VecOps; diff --git a/tutorials/v7/ntuple/ntpl005_introspection.C b/tutorials/v7/ntuple/ntpl005_introspection.C index 6f5f698c50c84..4dc090f126751 100644 --- a/tutorials/v7/ntuple/ntpl005_introspection.C +++ b/tutorials/v7/ntuple/ntpl005_introspection.C @@ -11,8 +11,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl006_friends.C b/tutorials/v7/ntuple/ntpl006_friends.C index d47167352a7e7..7525f8b8f85d7 100644 --- a/tutorials/v7/ntuple/ntpl006_friends.C +++ b/tutorials/v7/ntuple/ntpl006_friends.C @@ -10,8 +10,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl007_mtFill.C b/tutorials/v7/ntuple/ntpl007_mtFill.C index b9957539e547e..5e4558b893e90 100644 --- a/tutorials/v7/ntuple/ntpl007_mtFill.C +++ b/tutorials/v7/ntuple/ntpl007_mtFill.C @@ -10,8 +10,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl008_import.C b/tutorials/v7/ntuple/ntpl008_import.C index c5638457788ae..f03f443d29f43 100644 --- a/tutorials/v7/ntuple/ntpl008_import.C +++ b/tutorials/v7/ntuple/ntpl008_import.C @@ -10,8 +10,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl009_parallelWriter.C b/tutorials/v7/ntuple/ntpl009_parallelWriter.C index 186fcb5096f8e..a6ba4c2801622 100644 --- a/tutorials/v7/ntuple/ntpl009_parallelWriter.C +++ b/tutorials/v7/ntuple/ntpl009_parallelWriter.C @@ -10,8 +10,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl010_skim.C b/tutorials/v7/ntuple/ntpl010_skim.C index 7ac16c614b5fa..84ca49b0d6724 100644 --- a/tutorials/v7/ntuple/ntpl010_skim.C +++ b/tutorials/v7/ntuple/ntpl010_skim.C @@ -10,8 +10,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl011_global_temperatures.C b/tutorials/v7/ntuple/ntpl011_global_temperatures.C index 233f3943d20d7..49f85017534f9 100644 --- a/tutorials/v7/ntuple/ntpl011_global_temperatures.C +++ b/tutorials/v7/ntuple/ntpl011_global_temperatures.C @@ -14,8 +14,8 @@ /// \author John Yoon // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! During ROOT setup, configure the following flags: +// Functionality and interface are still subject to changes. +// During ROOT setup, configure the following flags: // `-DCMAKE_CXX_STANDARD=17 -Droot7=ON -Dwebgui=ON` #include diff --git a/tutorials/v7/ntuple/ntpl012_processor.C b/tutorials/v7/ntuple/ntpl012_processor.C index 39817fc3bd5be..b7d1c934a5624 100644 --- a/tutorials/v7/ntuple/ntpl012_processor.C +++ b/tutorials/v7/ntuple/ntpl012_processor.C @@ -10,8 +10,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl013_staged.C b/tutorials/v7/ntuple/ntpl013_staged.C index 2f3ec1cdbac6e..75d5ffcc80e8b 100644 --- a/tutorials/v7/ntuple/ntpl013_staged.C +++ b/tutorials/v7/ntuple/ntpl013_staged.C @@ -9,8 +9,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/v7/ntuple/ntpl014_framework.C b/tutorials/v7/ntuple/ntpl014_framework.C index eda411e79e4c8..bc326ba91f120 100644 --- a/tutorials/v7/ntuple/ntpl014_framework.C +++ b/tutorials/v7/ntuple/ntpl014_framework.C @@ -25,8 +25,7 @@ /// \author The ROOT Team // NOTE: The RNTuple classes are experimental at this point. -// Functionality, interface, and data format is still subject to changes. -// Do not use for real data! +// Functionality and interface are still subject to changes. #include #include diff --git a/tutorials/webgui/ping/ping.cxx b/tutorials/webgui/ping/ping.cxx index 82c91a01b229d..8a1a73799ffcc 100644 --- a/tutorials/webgui/ping/ping.cxx +++ b/tutorials/webgui/ping/ping.cxx @@ -125,7 +125,8 @@ void ping(int nclients = 1, int test_mode = 0) // create window window = ROOT::RWebWindow::Create(); - // configure maximal number of clients which allowed to connect + // configure number of clients are allowed to connect + ROOT::RWebWindowsManager::SetSingleConnMode(false); window->SetConnLimit(num_clients); // configure default html page diff --git a/ui5/canv/controller/Canvas.controller.js b/ui5/canv/controller/Canvas.controller.js index c96badc6368b2..7ba41c86313dc 100644 --- a/ui5/canv/controller/Canvas.controller.js +++ b/ui5/canv/controller/Canvas.controller.js @@ -408,7 +408,7 @@ sap.ui.define([ if (!split || (!curr && !panel_name) || (curr === panel_name)) return Promise.resolve(null); - const adjust_window_width = is_flex ^ was_flex; + const adjust_window_width = (is_flex ^ was_flex) && !canvp?._ignore_section_resize; if (adjust_window_width) can_elem?.getController().rememberAreaSize(); @@ -580,23 +580,31 @@ sap.ui.define([ this.getView().getModel().setProperty('/StatusIcon', chk_icon(new_state)); if (this.isStatusShown() != new_state) { - // restore size after next resize - this.getView().byId('MainPanel')?.getController().rememberAreaSize(); - this._Page.setShowFooter(new_state); const canvp = this.getCanvasPainter(); + // restore canvas size after getting next resize event + if (!canvp?._ignore_section_resize) + this.getView().byId('MainPanel')?.getController().rememberAreaSize(); + this._Page.setShowFooter(new_state); canvp?.processChanges('sbits', canvp); } }, + isToolBarShown() { + return this._Page.getShowSubHeader(); + }, + toggleToolBar(new_state) { + const old_state = this.isToolBarShown(); if (new_state === undefined) - new_state = this.getView().getModel().getProperty('/ToolbarIcon') === chk_icon(false); - - this.getView().byId('MainPanel')?.getController().rememberAreaSize(); - - this._Page.setShowSubHeader(new_state); - - this.getView().getModel().setProperty('/ToolbarIcon', chk_icon(new_state)); + new_state = !old_state; + + if (new_state !== old_state) { + // restore canvas size after getting next resize event + if (!this.getCanvasPainter()?._ignore_section_resize) + this.getView().byId('MainPanel')?.getController().rememberAreaSize(); + this._Page.setShowSubHeader(new_state); + this.getView().getModel().setProperty('/ToolbarIcon', chk_icon(new_state)); + } }, toggleToolTip(new_state) { diff --git a/ui5/canv/controller/Ged.controller.js b/ui5/canv/controller/Ged.controller.js index b8ba214d88da5..37e8669d577b8 100644 --- a/ui5/canv/controller/Ged.controller.js +++ b/ui5/canv/controller/Ged.controller.js @@ -43,12 +43,15 @@ sap.ui.define([ this.getView().byId('ged_page').removeAllContent(); // set dummy model - this.getView().setModel(new JSONModel({ SelectedClass: 'none' })); + this.getView().setModel(new JSONModel({ SelectedClass: 'none', SelectedPlace: '' })); // remove references this.currentPainter = null; this.currentPadPainter = null; this.currentPlace = undefined; + this.currentHistPainter = null; + this.currentHistAxis = ''; + // TODO: deregsiter for all events }, @@ -85,7 +88,6 @@ sap.ui.define([ /// data object includes _kind, _painter and _handle (optionally) modelPropertyChange(evnt, data) { let pars = evnt.getParameters(); - console.log('Model property changes', pars.path, pars.value, data._kind); if (data._handle) { if (typeof data._handle.verifyDirectChange === 'function') @@ -140,32 +142,36 @@ sap.ui.define([ }, getAxisHandle() { - if (this.currentPainter) - switch (this.currentPlace) { - case 'xaxis': return this.currentPainter.x_handle; - case 'yaxis': return this.currentPainter.y_handle; - case 'zaxis': return this.currentPainter.z_handle; - } - return null; + const fp = this.currentPainter?.getFramePainter(); + if (!fp || !this.currentPlace) + return this.currentPainter; + + let found = null; + ['x_handle','y_handle','z_handle','x2_handle', 'y2_handle', 'z2_handle'].forEach(name => { + const handle = fp[name]; + if ((handle?.hist_painter === this.currentPainter) && (handle?.hist_axis === this.currentPlace)) + found = handle; + }); + + return found; }, setAxisModel(model) { - let obj, painter, is_gaxis = !this.currentPlace, axis_chopt = '', axis_ticksize = 0, color_title = ''; - if (is_gaxis) { - painter = this.currentPainter; - obj = painter.getObject(); + let painter = this.getAxisHandle(), + obj = painter?.getObject(), + axis_chopt = '', axis_ticksize = 0, color_title = ''; + if (painter?.is_gaxis) { axis_chopt = obj.fChopt; axis_ticksize = obj.fTickSize; color_title = this.currentPadPainter.getColor(obj.fTextColor); } else { - obj = this.currentPainter.getObject(this.currentPlace); - painter = this.getAxisHandle(); axis_ticksize = obj.fTickLength; color_title = this.currentPadPainter.getColor(obj.fTitleColor); } let data = { - is_gaxis, + is_gaxis: painter?.is_gaxis, + mode2d: !painter.hist_painter?.options.Mode3D, specialRefresh: 'setAxisModel', axis: obj, axis_chopt, @@ -174,9 +180,11 @@ sap.ui.define([ color_label: this.currentPadPainter.getColor(obj.fLabelColor), center_label: obj.TestBit(this.getAxisBit('kCenterLabels')), vert_label: obj.TestBit(this.getAxisBit('kLabelsVert')), + font_label: painter.labelsFont?.index || 0, color_title, center_title: obj.TestBit(this.getAxisBit('kCenterTitle')), rotate_title: obj.TestBit(this.getAxisBit('kRotateTitle')), + font_title: painter.titleFont?.index || 0, }; model.setData(data); @@ -185,21 +193,19 @@ sap.ui.define([ processAxisModelChange(evnt /*, data */) { let pars = evnt.getParameters(), item = pars.path.substr(1), + handle = this.getAxisHandle(), + axis = handle?.getObject(), + is_gaxis = handle?.is_gaxis, + kind = '', exec = '', - painter = this.currentPainter, - kind = this.currentPlace, - is_gaxis = !kind, - axis = painter.getObject(kind), - col; + col, fontid; // while axis painter is temporary object, we should not try change it attributes + if (!this.currentPadPainter || !axis) + return; - if (!this.currentPadPainter || !axis) return; - - if (!is_gaxis && (typeof kind == 'string') && (kind.indexOf('axis') == 1)) - kind = kind.slice(0,1); - else - kind = ''; + if (!is_gaxis && handle?.hist_painter && handle?.hist_axis) + kind = handle.hist_axis; switch(item) { case 'axis/fTitle': @@ -211,11 +217,11 @@ sap.ui.define([ axis.fLineColor = col; else axis.fAxisColor = col; - exec = this.getColorExec(painter, pars.value, is_gaxis ? 'SetLineColor' : 'SetAxisColor'); + exec = this.getColorExec(this.currentPadPainter, pars.value, is_gaxis ? 'SetLineColor' : 'SetAxisColor'); break; case 'color_label': axis.fLabelColor = this.currentPadPainter.addColor(pars.value); - exec = this.getColorExec(painter, pars.value, 'SetLabelColor'); + exec = this.getColorExec(this.currentPadPainter, pars.value, 'SetLabelColor'); break; case 'center_label': axis.InvertBit(this.getAxisBit('kCenterLabels')); @@ -225,6 +231,11 @@ sap.ui.define([ axis.InvertBit(this.getAxisBit('kLabelsVert')); exec = `exec:SetBit(TAxis::kLabelsVert, ${pars.value ? true : false})`; break; + case 'font_label': + fontid = parseInt(pars.value)*10 + 2; + axis.fLabelFont = fontid; + exec = `exec:SetLabelFont(${fontid})`; + break; case 'axis/fLabelOffset': exec = `exec:SetLabelOffset(${pars.value})`; break; @@ -237,7 +248,7 @@ sap.ui.define([ axis.fTextColor = col; else axis.fTitleColor = col; - exec = this.getColorExec(painter, pars.value, 'SetTitleColor'); + exec = this.getColorExec(this.currentPadPainter, pars.value, 'SetTitleColor'); break; case 'center_title': axis.InvertBit(this.getAxisBit('kCenterTitle')); @@ -247,6 +258,14 @@ sap.ui.define([ axis.InvertBit(this.getAxisBit('kRotateTitle')); exec = is_gaxis ? `exec:SetBit(TAxis::kRotateTitle, ${pars.value ? true : false})` : `exec:RotateTitle(${pars.value ? true : false})`; break; + case 'font_title': + fontid = parseInt(pars.value)*10 + 2; + if (is_gaxis) + axis.fTextFont = fontid; + else + axis.fTitleFont = fontid; + exec = `exec:SetTitleFont(${fontid})`; + break; case 'axis_ticksize': if (is_gaxis) axis.fTickSize = pars.value; @@ -267,7 +286,7 @@ sap.ui.define([ } // TAxis belongs to main painter like TH1, therefore submit commands there - let main = is_gaxis ? painter : this.currentPainter.getMainPainter(true); + const main = is_gaxis ? handle : handle.hist_painter; if (main?.snapid) main.interactiveRedraw('pad', exec, kind); @@ -281,7 +300,8 @@ sap.ui.define([ let data = { logbase: handle.logbase || 0, - handle: handle, + handle, + mode2d: true, ticks_size: handle.ticksSize/handle.scaling_size, labels_offset: handle.labelsOffset/handle.scaling_size, labels_rotate: handle.labelsFont.angle != 0, @@ -323,19 +343,29 @@ sap.ui.define([ opts.Mode3D = opts.Mode3Dindx > 0; opts.Lego = parseInt(opts.Lego); + opts.Surf = parseInt(opts.Surf); let cl = this.getView().getModel().getProperty('/SelectedClass'); if ((typeof cl == 'string') && opts.Mode3D && (cl.indexOf('ROOT::Experimental::RHist') == 0)) opts.Lego = 12; - opts.Contor = parseInt(opts.Contor); + opts.Contour = parseInt(opts.Contour); opts.ErrorKind = parseInt(opts.ErrorKind); + opts.BoxStyle = parseInt(opts.BoxStyle); + opts.GLBox = parseInt(opts.GLBox); this.currentPainter?.interactiveRedraw('pad', 'drawopt'); }, - async onObjectSelect(padpainter, painter, place) { + async onObjectSelect(padpainter, painter) { + let place = ''; + if (painter.hist_painter && painter.hist_axis) { + // always keep reference on hist painter for x/y/z axis + // while axis painter is temporary and can change at any time + place = painter.hist_axis; + painter = painter.hist_painter; + } if ((this.currentPainter === painter) && (place === this.currentPlace)) return; @@ -343,22 +373,15 @@ sap.ui.define([ this.currentPainter = painter; this.currentPlace = place; - let obj = painter.getObject(place), selectedClass = ''; - - if (place == 'xaxis' && painter.x_handle) { - painter = painter.x_handle; - selectedClass = painter.getAxisType(); - } else if (place == 'yaxis' && painter.y_handle) { - painter = painter.y_handle; - selectedClass = painter.getAxisType(); - } else if (place == 'zaxis' && painter.z_handle) { - painter = painter.z_handle; - selectedClass = painter.getAxisType(); - } else { - selectedClass = obj ? obj._typename : painter.getObjectHint(); - } + if (place) + painter = this.getAxisHandle(); + + let obj = painter.getObject(), + selectedClass = obj?._typename ?? painter.getObjectHint(); + this.getView().getModel().setProperty('/SelectedClass', selectedClass); + this.getView().getModel().setProperty('/SelectedPlace', place); let oPage = this.getView().byId('ged_page'); oPage.removeAllContent(); @@ -371,7 +394,8 @@ sap.ui.define([ return; } - if (painter.lineatt?.used && !painter.lineatt.not_standard) { + if (painter.lineatt?.used && (selectedClass !== 'TAxis') && (selectedClass !== 'TGaxis')) { + console.log('Assign line attributes', painter.getObject()._typename, 'not_standard', painter.lineatt.not_standard); let model = new JSONModel( { attline: painter.lineatt } ); model.attachPropertyChange({ _kind: 'TAttLine', _painter: painter, _handle: painter.lineatt }, this.modelPropertyChange, this); await this.addFragment(oPage, 'TAttLine', model); @@ -408,25 +432,21 @@ sap.ui.define([ } if (selectedClass == 'TAxis') { - let model = new JSONModel({ is_taxis: true }); + let model = new JSONModel({ is_gaxis: false }); this.setAxisModel(model); model.attachPropertyChange({ _kind: 'TAxis' }, this.processAxisModelChange, this); await this.addFragment(oPage, 'Axis', model); - } - - if (selectedClass == 'TGaxis') { - let model = new JSONModel({ is_taxis: false }); + } else if (selectedClass == 'TGaxis') { + let model = new JSONModel({ is_gaxis: true }); this.setAxisModel(model); model.attachPropertyChange({ _kind: 'TGaxis' }, this.processAxisModelChange, this); await this.addFragment(oPage, 'Axis', model); - } - - if (typeof painter.getHisto == 'function') { + } else if (typeof painter.getHisto == 'function') { painter.options.Mode3Dindx = painter.options.Mode3D ? 1 : 0; painter.options.Error = !!painter.options.Error; painter.options.Palette = !!painter.options.Palette; painter.options.Zero = !!painter.options.Zero; - let model = new JSONModel({ opts : painter.options }); + let model = new JSONModel({ opts: painter.options }); model.attachPropertyChange({ options: painter.options }, this.processHistModelChange, this); await this.addFragment(oPage, 'Hist', model); } @@ -459,7 +479,7 @@ sap.ui.define([ if (!evnt) return; if (evnt.what == 'select') - this.onObjectSelect(evnt.padpainter, evnt.painter, evnt.place); + this.onObjectSelect(evnt.padpainter, evnt.painter); else if (evnt.what == 'redraw') this.onObjectRedraw(evnt.padpainter, evnt.painter); else if (evnt.what == 'padredraw') diff --git a/ui5/canv/view/Axis.fragment.xml b/ui5/canv/view/Axis.fragment.xml index 3a7a0befa92dd..1a26379a59135 100644 --- a/ui5/canv/view/Axis.fragment.xml +++ b/ui5/canv/view/Axis.fragment.xml @@ -16,18 +16,50 @@ - + + + + + + + + + + + + + + - - - - + @@ -35,18 +67,50 @@ - + + + + + + + + + + + + + + - - - - + @@ -54,7 +118,7 @@ - + diff --git a/ui5/canv/view/Ged.view.xml b/ui5/canv/view/Ged.view.xml index bdc0548593f9b..13fcbfc67a976 100644 --- a/ui5/canv/view/Ged.view.xml +++ b/ui5/canv/view/Ged.view.xml @@ -4,17 +4,6 @@ controllerName="rootui5.canv.controller.Ged" xmlns:mvc="sap.ui.core.mvc" xmlns="sap.m"> - - + \ No newline at end of file diff --git a/ui5/canv/view/Hist.fragment.xml b/ui5/canv/view/Hist.fragment.xml index 3a870ed73ab94..c34b91c29d365 100644 --- a/ui5/canv/view/Hist.fragment.xml +++ b/ui5/canv/view/Hist.fragment.xml @@ -3,34 +3,40 @@ xmlns:l="sap.ui.layout" xmlns:core="sap.ui.core"> - - - - - - + + + + + + + + + + + @@ -39,60 +45,113 @@ - - - - + + + + + + - - - - + + + + + - + + - - - - + + + + + + + + + + - + + + + + + + + + + + + + + + + + diff --git a/ui5/eve7/controller/ClientLog.controller.js b/ui5/eve7/controller/ClientLog.controller.js index 7dd58c8e70a9d..7c72b7ad4b795 100644 --- a/ui5/eve7/controller/ClientLog.controller.js +++ b/ui5/eve7/controller/ClientLog.controller.js @@ -40,12 +40,13 @@ sap.ui.define([ this.oMessageView.addStyleClass("sapUiSizeCompact"); + let cntrlr = this; let oBackButton = new Button({ icon: "sap-icon://nav-back", visible: false, press: function () { - that.oMessageView.navigateBack(); + cntrlr.oMessageView.navigateBack(); this.setVisible(false); } }); diff --git a/ui5/eve7/lib/GlViewerRCore.js b/ui5/eve7/lib/GlViewerRCore.js index d0f7ab178f606..cb2b4d5c985db 100644 --- a/ui5/eve7/lib/GlViewerRCore.js +++ b/ui5/eve7/lib/GlViewerRCore.js @@ -91,11 +91,11 @@ sap.ui.define([ this.RQ_MarkerScale * this.canvas.pixelRatio, this.RQ_LineScale * this.canvas.pixelRatio); } - this.updateViewerAttributes(); this.controller.createScenes(); this.controller.redrawScenes(); this.setupEventHandlers(); + this.updateViewerAttributes(); this.controller.glViewerInitDone(); } @@ -181,7 +181,8 @@ sap.ui.define([ // guides this.axis = new RC.Group(); this.axis.name = "Axis"; - this.overlay_scene.add(this.axis); + // this.overlay_scene.add(this.axis); // looks worse for now put to scene + this.scene.add(this.axis); if (this.controller.isEveCameraPerspective()) { @@ -544,20 +545,25 @@ sap.ui.define([ else fs = val.toExponential(2); } - return fs; + return val > 0 ? "+" + fs : fs; } let bb = new RC.Box3(); bb.setFromObject(this.scene); - let lines = []; - lines.push({ "p": new RC.Vector3(bb.min.x, 0, 0), "c": new RC.Color(1, 0, 0), "text": "X " + formatFloat(bb.min.x) }); - lines.push({ "p": new RC.Vector3(bb.max.x, 0, 0), "c": new RC.Color(1, 0, 0), "text": "X " + formatFloat(bb.max.x) }); - lines.push({ "p": new RC.Vector3(0, bb.min.y, 0), "c": new RC.Color(0, 1, 0), "text": "Y " + formatFloat(bb.min.y) }); - lines.push({ "p": new RC.Vector3(0, bb.max.y, 0), "c": new RC.Color(0, 1, 0), "text": "Y " + formatFloat(bb.max.y) }); - lines.push({ "p": new RC.Vector3(0, 0, bb.min.z), "c": new RC.Color(0, 0, 1), "text": "Z " + formatFloat(bb.min.z) }); - lines.push({ "p": new RC.Vector3(0, 0, bb.max.z), "c": new RC.Color(0, 0, 1), "text": "Z " + formatFloat(bb.max.z) }); + console.log(formatFloat(bb.max.x), formatFloat(bb.min.x), + formatFloat(bb.max.y), formatFloat(bb.min.y), + formatFloat(bb.max.z), formatFloat(bb.min.z)); + let lines = []; + lines.push({ "p": new RC.Vector3(bb.min.x, 0, 0), "c": new RC.Color(1, 0, 0), "text": "x " + formatFloat(bb.min.x) }); + lines.push({ "p": new RC.Vector3(bb.max.x, 0, 0), "c": new RC.Color(1, 0, 0), "text": "x " + formatFloat(bb.max.x) }); + lines.push({ "p": new RC.Vector3(0, bb.min.y, 0), "c": new RC.Color(0, 1, 0), "text": "y " + formatFloat(bb.min.y) }); + lines.push({ "p": new RC.Vector3(0, bb.max.y, 0), "c": new RC.Color(0, 1, 0), "text": "y " + formatFloat(bb.max.y) }); + if (this.controller.isEveCameraPerspective()) { + lines.push({ "p": new RC.Vector3(0, 0, bb.min.z), "c": new RC.Color(0, 0, 1), "text": "z " + formatFloat(bb.min.z) }); + lines.push({ "p": new RC.Vector3(0, 0, bb.max.z), "c": new RC.Color(0, 0, 1), "text": "z " + formatFloat(bb.max.z) }); + } for (const ax of lines) { let geom = new RC.Geometry(); @@ -567,27 +573,27 @@ sap.ui.define([ this.axis.add(ss); } - let url_base = this.eve_path + 'fonts/LiberationSans-Regular'; + let url_base = this.eve_path + 'sdf-fonts/LiberationSerif-Regular'; this.tex_cache.deliver_font(url_base, (texture, font_metrics) => { let diag = new RC.Vector3; bb.getSize(diag); - diag = diag.length() / 40; + diag = diag.length() / 100; let ag = this.axis; for (const ax of lines) { const text = new RC.ZText({ text: ax.text, fontTexture: texture, - xPos: 0, - yPos: 0, - fontSize: diag, - mode: RC.TEXT2D_SPACE_WORLD, + xPos: 0.0, + yPos: 0.0, + fontSize: 0.01, + mode: RC.TEXT2D_SPACE_MIXED, fontHinting: 1.0, color: this.fgCol, font: font_metrics, }); - text.position.copy(ax.p); - text.material.side = RC.FRONT_AND_BACK_SIDE; + text.position = ax.p; + text.material.side = RC.FRONT_SIDE; ag.add(text); } }, diff --git a/ui5/eve7/view/Summary.view.xml b/ui5/eve7/view/Summary.view.xml index 4905b11a369ec..94560cedcf375 100644 --- a/ui5/eve7/view/Summary.view.xml +++ b/ui5/eve7/view/Summary.view.xml @@ -4,7 +4,7 @@ xmlns:l="sap.ui.layout" xmlns="sap.m" class="sapUiSizeCompact"> - +