diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index 220149b8..6c058500 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -16,11 +16,10 @@ jobs: strategy: matrix: version: - - '1.6' - - '^1.6' + - '1.10.0' steps: - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@v1 + - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} - uses: cjdoris/julia-downgrade-compat-action@v1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5d532072..d1ddbae4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,16 +12,12 @@ jobs: fail-fast: false matrix: version: - - '1.6' - 'lts' - 'pre' os: - ubuntu-latest - macOS-latest - windows-latest - exclude: - - version: '1.6' - os: macOS-latest steps: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v2 diff --git a/Project.toml b/Project.toml index dc2114d6..d50ce5d6 100644 --- a/Project.toml +++ b/Project.toml @@ -25,21 +25,21 @@ Compat = "4.14.0" DocStringExtensions = "0.9" Documenter = "1.2.1" ForwardDiff = "0.10.13" -LinearAlgebra = "1.6" +LinearAlgebra = "1.10" Measurements = "2.11" NearestNeighbors = "0.4.16" -PDBTools = "1.1" +PDBTools = "2" Parameters = "0.12" -PrecompileTools = "1" +PrecompileTools = "1.2.1" ProgressMeter = "1.6" -Random = "1.6" +Random = "1.10" Setfield = "0.7, 0.8, 0.9, 1" StaticArrays = "1.6" -Test = "1.6" +Test = "1.10" TestItemRunner = "0.2" TestItems = "0.1, 1" Unitful = "1.19" -julia = "1.6" +julia = "1.10" [extras] Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" diff --git a/README.md b/README.md index 307f80d1..ee2dabef 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,8 @@ USER GUIDE:
## Installation -Download and install Julia for your platform from [this http url](https://julialang.org/downloads/). Version 1.6 or greater is required. +Download and install Julia for your platform from [this http url](https://julialang.org/downloads/). +Version 1.10 or greater is required. Install it as usual for registered Julia packages: diff --git a/docs/Project.toml b/docs/Project.toml index 4d63afae..d69c78d6 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,4 +1,5 @@ [deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" CellListMap = "69e1c6dd-3888-40e6-b3c8-31ac5f578864" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" diff --git a/docs/src/LowLevel.md b/docs/src/LowLevel.md index fddd8486..5660e31a 100644 --- a/docs/src/LowLevel.md +++ b/docs/src/LowLevel.md @@ -610,7 +610,7 @@ CollapsedDocStrings = true ```@autodocs Modules = [CellListMap] -Pages = ["Box.jl", "CellListMap.jl", "CellLists.jl", "CellOperations.jl", "CoreComputing.jl"] +Pages = ["Box.jl", "CellListMap.jl", "CellLists.jl", "CellOperations.jl", "./core_computing/self.jl", "./core_computing/cross.jl"] Order = [:type, :function] ``` diff --git a/docs/src/ParticleSystem.md b/docs/src/ParticleSystem.md index e0ba780f..b97e0e6f 100644 --- a/docs/src/ParticleSystem.md +++ b/docs/src/ParticleSystem.md @@ -107,7 +107,7 @@ The `ParticleSystem` constructor receives the properties of the system and sets ```julia-repl julia> using CellListMap, PDBTools -julia> argon_coordinates = coor(readPDB(CellListMap.argon_pdb_file)) +julia> argon_coordinates = coor(read_pdb(CellListMap.argon_pdb_file)) julia> system = ParticleSystem( xpositions=argon_coordinates, @@ -168,7 +168,7 @@ Now, let us setup the system with the new type of output variable, which will be ```julia-repl julia> using CellListMap, PDBTools -julia> argon_coordinates = coor(readPDB(CellListMap.argon_pdb_file)) +julia> argon_coordinates = coor(read_pdb(CellListMap.argon_pdb_file)) julia> system = ParticleSystem( xpositions=argon_coordinates, @@ -285,7 +285,7 @@ end To finally define the system and compute the properties: ```julia -argon_coordinates = coor(readPDB(CellListMap.argon_pdb_file)) +argon_coordinates = coor(read_pdb(CellListMap.argon_pdb_file)) system = ParticleSystem( xpositions = argon_coordinates, @@ -392,6 +392,15 @@ be attempted. The unit cell can be updated to new dimensions at any moment, with the `update_unitcell!` function: ```julia-repl +julia> using CellListMap, StaticArrays + +julia> system = ParticleSystem(; + positions=rand(SVector{3,Float64}, 1000), + unitcell=[1.0, 1.0, 1.0], + cutoff=0.1, + output = 0.0, + ); + julia> update_unitcell!(system, SVector(1.2, 1.2, 1.2)) ParticleSystem1 of dimension 3, composed of: Box{OrthorhombicCell, 3} @@ -408,6 +417,7 @@ ParticleSystem1 of dimension 3, composed of: Number of batches for cell list construction: 8 Number of batches for function mapping: 12 Type of output variable (forces): Vector{SVector{3, Float64}} + ``` !!! note @@ -425,30 +435,30 @@ ParticleSystem1 of dimension 3, composed of: The cutoff can also be updated, using the `update_cutoff!` function: ```julia-repl +julia> using CellListMap, StaticArrays + +julia> system = ParticleSystem(; + positions=rand(SVector{3,Float64}, 1000), + unitcell=[1.0, 1.0, 1.0], + cutoff=0.1, + output = 0.0, + ); + julia> update_cutoff!(system, 0.2) -ParticleSystem1 of dimension 3, composed of: +ParticleSystem1{output} of dimension 3, composed of: Box{OrthorhombicCell, 3} - unit cell matrix = [ 1.0, 0.0, 0.0; 0.0, 1.0, 0.0; 0.0, 0.0, 1.0 ] + unit cell matrix = [ 1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0 ] cutoff = 0.2 - number of computing cells on each dimension = [7, 7, 7] + number of computing cells on each dimension = [8, 8, 8] computing cell sizes = [0.2, 0.2, 0.2] (lcell: 1) - Total number of cells = 343 - CellListMap.CellList{3, Float64} + Total number of cells = 512 + CellList{3, Float64} 1000 real particles. - 125 cells with real particles. - 2792 particles in computing box, including images. - Parallelization auxiliary data set for: - Number of batches for cell list construction: 8 - Number of batches for function mapping: 8 - Type of output variable (forces): Vector{SVector{3, Float64}} + 636 cells with real particles. + 1738 particles in computing box, including images. + Parallelization auxiliary data set for 8 batch(es). + Type of output variable (output): Float64 -julia> map_pairwise!((x,y,i,j,d2,forces) -> update_forces!(x,y,i,j,d2,forces), system) -1000-element Vector{SVector{3, Float64}}: - [306.9612911344924, -618.7375562535321, -607.1449767066479] - [224.0803003775478, -241.05319348787023, 67.53780411933884] - ⋮ - [2114.4873184508524, -3186.265279868732, -6777.748445712408] - [-25.306486853608945, 119.69319481834582, 104.1501577339471] ``` ## Computations for two sets of particles @@ -633,28 +643,15 @@ map_pairwise((x,y,i,j,d2,u) -> u += d2, system) map_pairwise((x,y,i,j,d2,u) -> u += sqrt(d2), system; update_lists = false) ``` in which case we are computing the sum of distances from the same cell lists used to compute the energy in the previous example -(requires version 0.8.9). Specifically, this will skip the updating of the cell lists, thus be careful to not use this -option if the cutoff, unitcell, or any other property of the system changed. +(requires version 0.8.9). -For systems with two sets of particles, the -coordinates of the `xpositions` set can be updated, preserving the cell lists computed for the `ypositions`, but this requires -setting `autoswap=false` in the construction of the `ParticleSystem`: -```julia -using CellListMap, StaticArrays -system = ParticleSystem( - xpositions=rand(SVector{3,Float64},1000), - ypositions=rand(SVector{3,Float64},2000), - output=0.0, cutoff=0.1, unitcell=[1,1,1], - autoswap=false # Cell lists are constructed for ypositions -) -map_pairwise((x,y,i,j,d2,u) -> u += d2, system) -# Second run: preserve the cell lists but compute a different property -map_pairwise((x,y,i,j,d2,u) -> u += sqrt(d2), system; update_lists = false) -``` +!!! warning + This option will skip the updating of the cell lists, thus be careful to **not** use this + option if the coordinates, cutoff, unitcell, or any other property of the system changed. ### Control CellList cell size -The cell sizes of the construction of the cell lists can be controled with the keyword `lcell` +The cell sizes of the construction of the cell lists can be controlled with the keyword `lcell` of the `ParticleSystem` constructor. For example: ```julia-repl julia> system = ParticleSystem( @@ -699,12 +696,10 @@ ParticleSystem2{output} of dimension 2, composed of: number of computing cells on each dimension = [13, 13] computing cell sizes = [0.1, 0.1] (lcell: 1) Total number of cells = 169 - CellListMap.CellListPair{Vector{StaticArraysCore.SVector{2, Float64}}, 2, Float64, CellListMap.Swapped} - 200 particles in the reference vector. - 61 cells with real particles of target vector. - Parallelization auxiliary data set for: - Number of batches for cell list construction: 1 - Number of batches for function mapping: 1 + CellListMap.CellListPair{2, Float64} + 63 cells with real particles of the smallest set. + 85 cells with real particles of the largest set. + Parallelization auxiliary data set for 1 batch(es). Type of output variable (output): Float64 ``` !!! warning @@ -842,7 +837,7 @@ copy_output(md::MinimumDistance) = md reset_output!(md::MinimumDistance) = MinimumDistance(0, 0, +Inf) reducer!(md1::MinimumDistance, md2::MinimumDistance) = md1.d < md2.d ? md1 : md2 # Build system -xpositions = rand(SVector{3,Float64},1000); +xpositions = rand(SVector{3,Float64},100); ypositions = rand(SVector{3,Float64},1000); system = ParticleSystem( xpositions = xpositions, @@ -852,10 +847,125 @@ system = ParticleSystem( output = MinimumDistance(0,0,+Inf), output_name = :minimum_distance, ) +# Function following the required interface of the mapped function +get_md(_, _, i, j, d2, md) = minimum_distance(i, j, d2, md) # Compute the minimum distance -map_pairwise((x,y,i,j,d2,md) -> minimum_distance(i,j,d2,md), system) +map_pairwise(get_md, system) +``` + +In the above example, the function is used such that cell lists are constructed for both +sets. There are situations where this is not optimal, in particular: + +1. When one of the sets if very small. In this case, constructing a cell list for the largest + set becomes the bottleneck. Therefore, it is better to construct a cell list for the smallest + set and loop over the particles of the largest set. +2. When one of the set is fixed and the second set is variable. In this case, it is better to + construct the cell list for the fixed set only and loop over the variables of the variable set. + +For dealing with these possibilities, an additional two-set interface is available, where one maps +the computation over an array of particles relative to a previously computed cell list. Complementing +the example above, we could compute the same minimum distance using: + +```julia +# Construct the cell list system only for one of the sets: ypositions +ysystem = ParticleSystem( + positions = ypositions, + unitcell=[1.0,1.0,1.0], + cutoff = 0.1, + output = MinimumDistance(0,0,+Inf), + output_name = :minimum_distance, +) +# obtain the minimum distance between xpositions and the cell list in system +# Note the additional `xpositions` parameter in the call to map_pairwise. +map_pairwise(get_md, xpositions, ysystem) +``` + +Additionally, if the `xpositions` are updated, we can obtain compute the function relative to `ysystem` without +having to update the cell lists: + +```julia-repl +julia> xpositions = rand(SVector{3,Float64},100); + +julia> map_pairwise(get_md, xpositions, ysystem) +MinimumDistance(67, 580, 0.008423693268450603) +``` + +while with the two-set cell list system one would need to update the cell lists for this new computation. + +!!! compat + The single-set cross-interaction was introduced in v0.10.0. It uses the method previously implemented + for all cross-interactions. + +### Benchmarking of cross-interaction alternatives + +With the following functions we will benchmark the performance of the two alternatives for computing +cross-set interactions, **including** the time required to build the cell lists (the initialization +of the `ParticleSystem`) objects: + +```julia +# First alternative: compute cell lists for the two sets +function two_set_celllist(xpositions, ypositions) + system = ParticleSystem( + xpositions = xpositions, + ypositions = ypositions, + unitcell=[1.0,1.0,1.0], + cutoff = 0.1, + output = MinimumDistance(0,0,+Inf), + output_name = :minimum_distance, +) + return map_pairwise(get_md, system) +end +# Second alternative: compute cell lists for one set +function one_set_celllist(xpositions, ypositions) + system = ParticleSystem( + positions = ypositions, + unitcell=[1.0,1.0,1.0], + cutoff = 0.1, + output = MinimumDistance(0,0,+Inf), + output_name = :minimum_distance, +) + return map_pairwise(get_md, xpositions, system) +end +``` + +If one of the sets is small, the one-set alternative is clearly faster, if we +construct the cell lists for the smaller set: + +```julia-repl +julia> using BenchmarkTools + +julia> xpositions = rand(SVector{3,Float64}, 10^6); + +julia> ypositions = rand(SVector{3,Float64}, 100); + +julia> @btime one_set_celllist($xpositions, $ypositions) samples=1 evals=1 + 25.165 ms (1531 allocations: 575.72 KiB) +MinimumDistance(65937, 63, 0.00044803040276614203) + +julia> @btime two_set_celllist($xpositions, $ypositions) samples=1 evals=1 + 207.129 ms (154794 allocations: 478.00 MiB) +MinimumDistance(65937, 63, 0.00044803040276614203) +``` + +For much larger system, though, the computation of the cell lists become less relevant and the first alternative +might be the most favorable, even including the cell lists updates: + +```julia-repl +julia> @btime one_set_celllist($xpositions, $ypositions) samples=1 evals=1 + 12.196 s (153327 allocations: 478.02 MiB) +MinimumDistance(627930, 889247, 7.59096139675071e-5) + +julia> @btime two_set_celllist($xpositions, $ypositions) samples=1 evals=1 + 2.887 s (306416 allocations: 952.00 MiB) +MinimumDistance(627930, 889247, 7.59096139675071e-5) ``` +This performance advantage of the two-set cell lists arises because more interactions can be skipped +by [precomputing properties of the cells involved](https://onlinelibrary.wiley.com/doi/full/10.1002/jcc.20563). +On the other side, when the lists are available +for only one set, the loop over all the particles of the second set is mandatory. Since this loop +is fast, it is favorable over the construction of the cell lists for smaller sets. + ### Particle simulation In this example, a complete particle simulation is illustrated, with a simple potential. This example can illustrate how particle positions and forces can be updated. Run this diff --git a/docs/src/ecosystem.md b/docs/src/ecosystem.md index fc7265c5..49e71b5d 100644 --- a/docs/src/ecosystem.md +++ b/docs/src/ecosystem.md @@ -32,7 +32,7 @@ are in Angstroms, while the box size and cutoff are defined in nanometers: ```jldoctest ;filter = r"\d+" => "" julia> using CellListMap, Unitful, PDBTools -julia> positions = coor(readPDB(CellListMap.argon_pdb_file))u"Å"; +julia> positions = coor(read_pdb(CellListMap.argon_pdb_file))u"Å"; julia> system = ParticleSystem( positions = positions, @@ -53,7 +53,7 @@ julia> map_pairwise((x,y,i,j,d2,out) -> out += d2, system) ```jldoctest ;filter = r"\d+" => s"" julia> using CellListMap, Unitful, PDBTools -julia> positions = coor(readPDB(CellListMap.argon_pdb_file))u"Å"; +julia> positions = coor(read_pdb(CellListMap.argon_pdb_file))u"Å"; julia> cutoff = 0.8u"nm"; diff --git a/docs/src/neighborlists.md b/docs/src/neighborlists.md index 3f254abd..55c5384c 100644 --- a/docs/src/neighborlists.md +++ b/docs/src/neighborlists.md @@ -194,7 +194,6 @@ Additional optional parameters can be used in a `neighborlist` call: | `parallel` | `Bool` | `true` | turns on and off parallelization | | `show_progress` | `Bool` | `false` | turns on and off progress bar | | `nbatches` | `Tuple{Int,Int}` | `(0,0)` | Number of batches used in parallelization (see [here](@ref Number-of-batches)) | -| `autoswap` | `Bool` | `true` | (advanced) automatically choose set to construct the cell lists | ## Docstrings diff --git a/src/Box.jl b/src/Box.jl index 1ff44e8f..4d5fe39c 100644 --- a/src/Box.jl +++ b/src/Box.jl @@ -381,7 +381,7 @@ particles do not see images of each other. ```jldoctest ;filter = r"\\d+" => "" julia> using CellListMap, PDBTools -julia> x = coor(readPDB(CellListMap.argon_pdb_file)); +julia> x = coor(read_pdb(CellListMap.argon_pdb_file)); julia> box = Box(limits(x), 10.0) Box{NonPeriodicCell, 3} diff --git a/src/CellListMap.jl b/src/CellListMap.jl index dcdc58d3..e4e4497f 100644 --- a/src/CellListMap.jl +++ b/src/CellListMap.jl @@ -44,114 +44,12 @@ include("./Box.jl") include("./CellLists.jl") include("./CellOperations.jl") include("./ParticleSystem.jl") -include("./CoreComputing.jl") -""" - map_pairwise!( - f::Function, - output, - box::Box, - cl::CellList - ;parallel::Bool=true, - show_progress::Bool=false - ) - -This function will run over every pair of particles which are closer than -`box.cutoff` and compute the Euclidean distance between the particles, -considering the periodic boundary conditions given in the `Box` structure. -If the distance is smaller than the cutoff, a function `f` of the -coordinates of the two particles will be computed. - -The function `f` receives six arguments as input: -``` -f(x,y,i,j,d2,output) -``` -Which are the coordinates of one particle, the coordinates of the -second particle, the index of the first particle, the index of the second -particle, the squared distance between them, and the `output` variable. -It has also to return the same `output` variable. Thus, `f` may or not -mutate `output`, but in either case it must return it. With that, it is -possible to compute an average property of the distance of the particles -or, for example, build a histogram. The squared distance `d2` is computed -internally for comparison with the -`cutoff`, and is passed to the `f` because many times it is used for the -desired computation. - -## Example - -Computing the mean absolute difference in `x` position between random particles, -remembering the number of pairs of `n` particles is `n(n-1)/2`. The function does -not use the indices or the distance, such that we remove them from the parameters -by using a closure. - -```julia-repl -julia> n = 100_000; - -julia> box = Box([250,250,250],10); - -julia> x = [ SVector{3,Float64}(sides .* rand(3)) for i in 1:n ]; - -julia> cl = CellList(x,box); - -julia> f(x,y,sum_dx) = sum_dx + abs(x[1] - y[1]) - -julia> normalization = N / (N*(N-1)/2) # (number of particles) / (number of pairs) - -julia> avg_dx = normalization * map_parwise!((x,y,i,j,d2,sum_dx) -> f(x,y,sum_dx), 0.0, box, cl) - -``` - -""" -function map_pairwise!(f::F, output, box::Box, cl::CellList; - # Parallelization options - parallel::Bool=true, - output_threaded=nothing, - reduce::Function=reduce, - show_progress::Bool=false, -) where {F} # Needed for specialization for this function (avoids some allocations) - if parallel - output = map_pairwise_parallel!( - f,output,box,cl; - output_threaded=output_threaded, - reduce=reduce, - show_progress=show_progress - ) - else - output = map_pairwise_serial!(f,output,box,cl,show_progress=show_progress) - end - return output -end - -""" - map_pairwise!(f::Function,output,box::Box,cl::CellListPair) - -The same but to evaluate some function between pairs of the particles of the vectors. - -""" -function map_pairwise!(f::F1, output, box::Box, cl::CellListPair{V,N,T,Swap}; - # Parallelization options - parallel::Bool=true, - output_threaded=nothing, - reduce::F2=reduce, - show_progress::Bool=false -) where {F1,F2,V,N,T,Swap} # F1, F2 Needed for specialization for these functions - if Swap == Swapped - fswap(x,y,i,j,d2,output) = f(y,x,j,i,d2,output) - else - fswap = f - end - if parallel - output = map_pairwise_parallel!( - fswap,output,box,cl; - output_threaded=output_threaded, - reduce=reduce, - show_progress=show_progress - ) - else - output = map_pairwise_serial!(fswap,output,box,cl,show_progress=show_progress) - end - return output -end +# Core-computing infraestructure +include("./core_computing/auxiliary_functions.jl") +include("./core_computing/vicinal_cells.jl") +include("./core_computing/self.jl") +include("./core_computing/cross.jl") # Utils include("./neighborlists.jl") diff --git a/src/CellLists.jl b/src/CellLists.jl index 04387e6e..4d4406f3 100644 --- a/src/CellLists.jl +++ b/src/CellLists.jl @@ -1,5 +1,5 @@ # -# This file contains all structre types and functions necessary for building +# This file contains all structure types and functions necessary for building # the CellList and CellListPair structures. # @@ -94,7 +94,6 @@ function copy_cell(cell::Cell{N,T}) where {N,T} ) end - #= $(TYPEDEF) @@ -148,6 +147,7 @@ Base.@kwdef mutable struct CellList{N,T} projected_particles::Vector{Vector{ProjectedParticle{N,T}}} = Vector{Vector{ProjectedParticle{N,T}}}(undef, 0) end + function Base.show(io::IO, ::MIME"text/plain", cl::CellList) _println(io, typeof(cl)) _println(io, " $(cl.n_real_particles) real particles.") @@ -157,35 +157,26 @@ end #= -Structures to control dispatch on swapped vs. not swapped cell list pairs. - -=# -struct Swapped end -struct NotSwapped end - -#= - $(TYPEDEF) # Extended help $(TYPEDFIELDS) -Structure that will cointain the cell lists of two independent sets of +Structure that will contains the cell lists of two independent sets of particles for cross-computation of interactions =# -struct CellListPair{V,N,T,Swap} - ref::V - target::CellList{N,T} +struct CellListPair{N,T} + small_set::CellList{N,T} + large_set::CellList{N,T} + swap::Bool end -CellListPair(ref::V, target::CellList{N,T}, ::Swap) where {V,N,T,Swap} = - CellListPair{V,N,T,Swap}(ref, target) function Base.show(io::IO, ::MIME"text/plain", cl::CellListPair) _print(io, typeof(cl), "\n") - _print(io, " $(length(cl.ref)) particles in the reference vector.\n") - _print(io, " $(cl.target.n_cells_with_real_particles) cells with real particles of target vector.") + _print(io, " $(cl.small_set.n_cells_with_real_particles) cells with real particles of the smallest set.\n") + _print(io, " $(cl.large_set.n_cells_with_real_particles) cells with real particles of the largest set.") end #= @@ -204,7 +195,12 @@ function set_number_of_batches!(cl::CellList{N,T}, nbatches::Tuple{Int,Int}=(0, nbatches = NumberOfBatches(nbatches) else if nbatches != (0, 0) && nbatches != (1, 1) - println("WARNING: nbatches set to $nbatches, but parallel is set to false, implying nbatches == (1, 1)") + @warn begin + """\n + WARNING: nbatches set to $nbatches, but parallel is set to false, implying nbatches == (1, 1) + + """ + end _file=nothing _line=nothing end nbatches = NumberOfBatches((1, 1)) end @@ -225,43 +221,31 @@ function set_number_of_batches!(cl::CellList{N,T}, nbatches::Tuple{Int,Int}=(0, end return cl end + # Heuristic choices for the number of batches, for an atomic system _nbatches_build_cell_lists(n::Int) = max(1, min(n, min(8, nthreads()))) _nbatches_map_computation(n::Int) = max(1, min(n, min(floor(Int, 2^(log10(n) + 1)), nthreads()))) function set_number_of_batches!( - cl::CellListPair{V,N,T,Swap}, - nbatches::Tuple{Int,Int}=(0, 0); + cl::CellListPair{N,T}, + _nbatches::Tuple{Int,Int}=(0, 0); parallel=true -) where {V,N,T,Swap} - if parallel - nbatches = NumberOfBatches(nbatches) - else - if nbatches != (0, 0) && nbatches != (1, 1) - println("WARNING: nbatches set to $nbatches, but parallel is set to false, implying nbatches == (1, 1)") - end - nbatches = NumberOfBatches((1, 1)) - end - if nbatches.build_cell_lists < 1 - n1 = _nbatches_build_cell_lists(cl.target.n_real_particles) - else - n1 = nbatches.build_cell_lists - end - if nbatches.map_computation < 1 - n2 = _nbatches_map_computation(length(cl.ref)) - else - n2 = nbatches.map_computation - end - cl.target.nbatches = NumberOfBatches(n1, n2) - return CellListPair{V,N,T,Swap}(cl.ref, cl.target) +) where {N,T} + large_set = set_number_of_batches!(cl.large_set, _nbatches; parallel) + return CellListPair{N,T}( + set_number_of_batches!(cl.small_set, nbatches(large_set); parallel), + large_set, + cl.swap, + ) end """ - nbatches(cl) + nbatches(cl::CellList) + nbatches(cl::CellListPair) -Returns the number of batches for parallel processing that will be used in the pairwise function mappings associated to cell list `cl`. -It returns the `cl.nbatches.map_computation` value. This function is important because it must be used to set the number of copies -of custom preallocated output arrays. +Returns the number of batches for parallel processing that will be used. Returns a tuple, where +the first element is the number of batches for the construction of the cell lists, and the second +element is the number of batches for the pairwise mapping. A second argument can be provided, which may be `:map` or `:build`, in which case the function returns either the number of batches used for pairwise mapping or for the construction of the cell lists. Since this second value is internal and does not affect the interface, @@ -269,29 +253,51 @@ it can be usually ignored. ## Example -```julia-repl +```jldoctest +julia> using CellListMap + julia> x = rand(3,1000); box = Box([1,1,1],0.1); -julia> cl = CellList(x,box,nbatches=(2,16)); +julia> cl = CellList(x, box, nbatches=(2,16)); julia> nbatches(cl) -16 +(2, 16) + +julia> nbatches(cl,:build) +2 julia> nbatches(cl,:map) 16 -julia> nbatches(cl,:build) -2 ``` """ -nbatches(cl::CellList) = cl.nbatches.map_computation function nbatches(cl::CellList, s::Symbol) s == :map_computation || s == :map && return cl.nbatches.map_computation s == :build_cell_lists || s == :build && return cl.nbatches.build_cell_lists end -nbatches(cl::CellListPair) = nbatches(cl.target) -nbatches(cl::CellListPair, s::Symbol) = nbatches(cl.target, s) +nbatches(cl::CellList) = (nbatches(cl, :build), nbatches(cl, :map)) +nbatches(cl::CellListPair) = nbatches(cl.small_set) +nbatches(cl::CellListPair, s::Symbol) = nbatches(cl.small_set, s) + +@testitem "output of nbatches" begin + using CellListMap, StaticArrays + x = rand(3,100) + y = rand(3,10) + box = Box([1,1,1],0.1) + cl = CellList(x,box; nbatches=(2,4)) + @test nbatches(cl) == (2, 4) + @test nbatches(cl, :build) == 2 + @test nbatches(cl, :map) == 4 + cl = CellList(x,y,box; nbatches=(2,4)) + @test nbatches(cl) == (2, 4) + @test nbatches(cl, :build) == 2 + @test nbatches(cl, :map) == 4 + cl = CellList(x,y,box) + @test nbatches(cl.small_set) == nbatches(cl.large_set) + cl = CellList(x,box; nbatches=(2,4), parallel=false) + @test nbatches(cl) == (1,1) +end #= @@ -306,7 +312,6 @@ be considered by each thread on parallel construction. =# @with_kw struct AuxThreaded{N,T} - particles_per_batch::Int idxs::Vector{UnitRange{Int}} = Vector{UnitRange{Int}}(undef, 0) lists::Vector{CellList{N,T}} = Vector{CellList{N,T}}(undef, 0) end @@ -315,6 +320,15 @@ function Base.show(io::IO, ::MIME"text/plain", aux::AuxThreaded) _print(io, " Auxiliary arrays for nbatches = ", length(aux.lists)) end +@with_kw struct AuxThreadedPair{N,T} + small_set::AuxThreaded{N,T} + large_set::AuxThreaded{N,T} +end +function Base.show(io::IO, ::MIME"text/plain", aux::AuxThreadedPair) + _println(io, typeof(aux)) + _print(io, " Auxiliary arrays for nbatches = ", length(aux.small_set.lists)) +end + """ AuxThreaded(cl::CellList{N,T}) where {N,T} @@ -323,9 +337,11 @@ update of cell lists. ## Example ```julia-repl +julia> using CellListMap + julia> box = Box([250,250,250],10); -julia> x = [ 250*rand(3) for _ in 1:100_000 ]; +julia> x = [ 250*rand(3) for i in 1:10000 ]; julia> cl = CellList(x,box); @@ -341,10 +357,9 @@ CellList{3, Float64} ``` """ -function AuxThreaded(cl::CellList{N,T}; particles_per_batch=10_000) where {N,T} +function AuxThreaded(cl::CellList{N,T}) where {N,T} nbatches = cl.nbatches.build_cell_lists aux = AuxThreaded{N,T}( - particles_per_batch=particles_per_batch, idxs=Vector{UnitRange{Int}}(undef, nbatches), lists=Vector{CellList{N,T}}(undef, nbatches) ) @@ -352,10 +367,7 @@ function AuxThreaded(cl::CellList{N,T}; particles_per_batch=10_000) where {N,T} nbatches == 1 && return aux @sync for ibatch in eachindex(aux.lists) @spawn begin - cl_batch = CellList{N,T}( - n_real_particles=particles_per_batch, # this is reset before filling, in UpdateCellList! - number_of_cells=cl.number_of_cells, - ) + cl_batch = CellList{N,T}(number_of_cells=cl.number_of_cells) aux.lists[ibatch] = cl_batch end end @@ -378,7 +390,12 @@ corresponding `AuxThreaded` structure. =# function set_idxs!(idxs, n_particles, nbatches) - length(idxs) == nbatches || throw(ArgumentError("Modifying `nbatches` requires an explicit update of the AuxThreaded auxiliary array.")) + if length(idxs) != nbatches + throw(ArgumentError("""\n + Modifying `nbatches` requires an explicit update of the AuxThreaded auxiliary array. + + """)) + end nperthread, nrem = divrem(n_particles, nbatches) first = 1 for ibatch in eachindex(idxs) @@ -400,6 +417,8 @@ to be passed to `UpdateCellList!` for in-place update of cell lists. ## Example ```julia-repl +julia> using CellListMap + julia> box = Box([250,250,250],10); julia> x = [ 250*rand(3) for i in 1:50_000 ]; @@ -412,7 +431,7 @@ julia> aux = CellListMap.AuxThreaded(cl) CellListMap.AuxThreaded{3, Float64} Auxiliary arrays for nthreads = 8 -julia> UpdateCellList!(x,box,cl,aux) +julia> UpdateCellList!(x,y,box,cl,aux) CellList{3, Float64} 100000 real particles. 31190 cells with real particles. @@ -420,8 +439,8 @@ CellList{3, Float64} ``` """ -AuxThreaded(cl_pair::CellListPair; particles_per_batch=10_000) = - AuxThreaded(cl_pair.target, particles_per_batch=particles_per_batch) +AuxThreaded(cl_pair::CellListPair) = + AuxThreadedPair(AuxThreaded(cl_pair.small_set), AuxThreaded(cl_pair.large_set)) """ CellList( @@ -514,14 +533,11 @@ end box::Box{UnitCellType,N,T}; parallel::Bool=true, nbatches::Tuple{Int,Int}=(0,0), - autoswap::Bool=true, validate_coordinates::Union{Function,Nothing}=_validate_coordinates ) where {UnitCellType,N,T} Function that will initialize a `CellListPair` structure from scratch, given two vectors of particle coordinates and a `Box`, which contain the size of the system, cutoff, etc. -By default, the cell list will be constructed for smallest vector, but this is not always -the optimal choice. Using `autoswap=false` the cell list is constructed for the second (`y`) ## Example @@ -537,11 +553,6 @@ CellListMap.CellListPair{Vector{SVector{3, Float64}}, 3, Float64} 10000 particles in the reference vector. 961 cells with real particles of target vector. -julia> cl = CellList(x,y,box,autoswap=false) -CellListMap.CellListPair{Vector{SVector{3, Float64}}, 3, Float64} - 1000 particles in the reference vector. - 7389 cells with real particles of target vector. - ``` """ @@ -551,21 +562,14 @@ function CellList( box::Box{UnitCellType,N,T}; parallel::Bool=true, nbatches::Tuple{Int,Int}=(0, 0), - autoswap=true, validate_coordinates::Union{Function,Nothing}=_validate_coordinates, ) where {UnitCellType,N,T} - if !autoswap || length(x) >= length(y) - isnothing(validate_coordinates) || validate_coordinates(x) - ref = [SVector{N,T}(ntuple(i -> el[i], Val(N))) for el in x] - target = CellList(y, box; parallel, validate_coordinates) - swap = NotSwapped() - else - isnothing(validate_coordinates) || validate_coordinates(y) - ref = [SVector{N,T}(ntuple(i -> el[i], Val(N))) for el in y] - target = CellList(x, box; parallel, validate_coordinates) - swap = Swapped() - end - cl_pair = CellListPair(ref, target, swap) + xsmall, xlarge, swap = length(x) <= length(y) ? (x, y, false) : (y, x, true) + isnothing(validate_coordinates) || validate_coordinates(x) + isnothing(validate_coordinates) || validate_coordinates(y) + small_set = CellList(xsmall, box; parallel, validate_coordinates) + large_set = CellList(xlarge, box; parallel, validate_coordinates) + cl_pair = CellListPair{N,T}(small_set, large_set, swap) cl_pair = set_number_of_batches!(cl_pair, nbatches; parallel) return cl_pair end @@ -742,7 +746,10 @@ function UpdateCellList!( if length(x) > 0 && (length(x[begin]) != size(box.input_unit_cell.matrix, 1)) n1 = length(x[begin]) n2 = size(box.input_unit_cell.matrix, 1) - throw(DimensionMismatch("Positions have dimension $n1, but the unit cell has dimension $n2.")) + throw(DimensionMismatch("""\n + Positions have dimension $n1, but the unit cell has dimension $n2. + + """)) end # Add particles to cell list @@ -899,7 +906,11 @@ merge cell lists computed in parallel threads. function merge_cell_lists!(cl::CellList, aux::CellList) # One should never get here if the lists do not share the same # computing box if cl.number_of_cells != aux.number_of_cells - error("cell lists must have the same number of cells to be merged.") + throw(ArgumentError("""\n + Cell lists must have the same number of cells to be merged. + Got inconsistent number of cells: $(cl.number_of_cells) and $(aux.number_of_cells). + + """)) end cl.n_particles += aux.n_particles cl.n_real_particles += aux.n_real_particles @@ -1060,7 +1071,7 @@ end ) Function that will update a previously allocated `CellListPair` structure, given -new updated particle positions, for example. This method will allocate new +updated particle positions, for example. This method will allocate new `aux` threaded auxiliary arrays. For a non-allocating version, see the `UpdateCellList!(x,y,box,cl,aux)` method. @@ -1069,7 +1080,9 @@ should throw an error if the coordinates are invalid. By default, this function throws an error if some coordinates are missing or are NaN. Set to `nothing` to disable this check, or provide a custom function. -```julia-repl +```jlcodetest +julia> using CellListMap, StaticArrays + julia> box = Box([250,250,250],10); julia> x = [ 250*rand(SVector{3,Float64}) for i in 1:1000 ]; @@ -1079,7 +1092,6 @@ julia> y = [ 250*rand(SVector{3,Float64}) for i in 1:10000 ]; julia> cl = CellList(x,y,box); julia> UpdateCellList!(x,y,box,cl); # update lists - ``` """ @@ -1146,6 +1158,8 @@ lists. ## Example ```julia-repl +julia> using CellListMap + julia> box = Box([250,250,250],10); julia> x = [ 250*rand(3) for i in 1:50_000 ]; @@ -1172,8 +1186,8 @@ CellList{3, Float64} 12591 particles in computing box, including images. ``` -To illustrate the expected ammount of allocations, which are a consequence -of thread spawning only: + +The allocations in the above calls are a consequence of the thread spawning only: ```julia-repl julia> using BenchmarkTools @@ -1197,46 +1211,17 @@ function UpdateCellList!( x::AbstractVector{<:AbstractVector}, y::AbstractVector{<:AbstractVector}, box::Box, - cl_pair::CellListPair{V,N,T,Swap}, - aux::Union{Nothing,AuxThreaded}; + cl_pair::CellListPair{N,T}, + aux::Union{Nothing,AuxThreadedPair}; parallel::Bool=true, validate_coordinates::Union{Nothing,Function}=_validate_coordinates, -) where {V,N,T,Swap<:NotSwapped} - ref = x +) where {N,T} isnothing(validate_coordinates) || validate_coordinates(x) - target = UpdateCellList!(y, box, cl_pair.target, aux; parallel, validate_coordinates) - cl_pair = _update_CellListPair!(ref, target, cl_pair) - return cl_pair -end -# Swapped vectors version -function UpdateCellList!( - x::AbstractVector{<:AbstractVector}, - y::AbstractVector{<:AbstractVector}, - box::Box, - cl_pair::CellListPair{V,N,T,Swap}, - aux::Union{Nothing,AuxThreaded}; - parallel::Bool=true, - validate_coordinates::Union{Nothing,Function}=_validate_coordinates, -) where {V,N,T,Swap<:Swapped} - ref = y isnothing(validate_coordinates) || validate_coordinates(y) - target = UpdateCellList!(x, box, cl_pair.target, aux; parallel, validate_coordinates) - cl_pair = _update_CellListPair!(ref, target, cl_pair) - return cl_pair -end - -# Function barrier that was required to avoid the `Swap` type to cause some instability -function _update_CellListPair!(ref, target, cl_pair::CellListPair{V,N,T,Swap}) where {V,N,T,Swap} - # This resizing will fail if the data was input as a (N,M) matrix, because resizing - # is not implemented for reinterpreted arrays. - if length(ref) != length(cl_pair.ref) - resize!(cl_pair.ref, length(ref)) - end - for i in eachindex(ref, cl_pair.ref) - @inbounds cl_pair.ref[i] = SVector{N,T}(ntuple(j -> ref[i][j], Val(N))) - end - cl_pair = CellListPair{V,N,T,Swap}(cl_pair.ref, target) - return cl_pair + xsmall, xlarge, swap = length(x) <= length(y) ? (x, y, false) : (y, x, true) + small_set = UpdateCellList!(xsmall, box, cl_pair.small_set, aux.small_set; parallel, validate_coordinates) + large_set = UpdateCellList!(xlarge, box, cl_pair.large_set, aux.large_set; parallel, validate_coordinates) + return CellListPair{N,T}(small_set, large_set, swap) end #= @@ -1259,7 +1244,7 @@ function UpdateCellList!( y::AbstractMatrix, box::Box{UnitCellType,N}, cl_pair::CellListPair, - aux::Union{Nothing,AuxThreaded}; + aux::Union{Nothing,AuxThreadedPair}; kargs... ) where {UnitCellType,N} size(x, 1) == N || throw(DimensionMismatch("First dimension of input matrix must be $N")) @@ -1276,7 +1261,7 @@ Returns the average number of real particles per computing cell. =# particles_per_cell(cl::CellList) = cl.n_real_particles / cl.number_of_cells -particles_per_cell(cl::CellListPair) = particles_per_cell(cl.target) +particles_per_cell(cl::CellListPair) = particles_per_cell(cl.small_set) + particle_cell(cl.large_set) @testitem "celllists - validate coordinates" begin using CellListMap diff --git a/src/CoreComputing.jl b/src/CoreComputing.jl deleted file mode 100644 index 70a0ef98..00000000 --- a/src/CoreComputing.jl +++ /dev/null @@ -1,392 +0,0 @@ -#= - reduce(output, output_threaded) - -Most common reduction function, which sums the elements of the output. -Here, `output_threaded` is a vector containing `nbatches(cl)` copies of -the `output` variable (a scalar or an array). Custom reduction functions -must replace this one if the reduction operation is not a simple sum. -The `output_threaded` array is, by default, created automatically by copying -the given `output` variable `nbatches(cl)` times. - -## Examples - -Scalar reduction: - -```julia-repl -julia> output = 0.; output_threaded = [ 1, 2 ]; - -julia> CellListMap.reduce(output,output_threaded) -3 -``` - -Array reduction: - -```julia-repl -julia> output = [0,0]; output_threaded = [ [1,1], [2,2] ]; - -julia> CellListMap.reduce(output,output_threaded) -2-element Vector{Int64}: - 3 - 3 - -julia> output -2-element Vector{Int64}: - 3 - 3 -``` - -=# -reduce(output::T, output_threaded::Vector{T}) where {T} = sum(output_threaded) -function reduce(output::T, output_threaded::Vector{T}) where {T<:AbstractArray} - for ibatch in eachindex(output_threaded) - @. output += output_threaded[ibatch] - end - return output -end -function reduce(output, output_threaded) - T = typeof(output) - throw(ArgumentError("""\n - No method matching reduce(::$(typeof(output)),::$(typeof(output_threaded))) - - Please provide a method that appropriately reduces a `Vector{$T}`, with - the signature: - - ``` - custom_reduce(output::$T, output_threaded::Vector{$T}) - ``` - - The reduction function **must** return the `output` variable, even - if it is mutable. - - See: https://m3g.github.io/CellListMap.jl/stable/parallelization/#Custom-reduction-functions - - """)) -end - -#= - partition!(by, x::AbstractVector) - -# Extended help - -Function that reorders `x` vector by putting in the first positions the -elements with values satisfying `by(el)`. Returns the number of elements -that satisfy the condition. - -=# -function partition!(by, x::AbstractVector) - iswap = 1 - @inbounds for i in eachindex(x) - if by(x[i]) - if iswap != i - x[iswap], x[i] = x[i], x[iswap] - end - iswap += 1 - end - end - return iswap - 1 -end - -# -# Auxiliary functions to control the exibition of the progress meter -# -_next!(::Nothing) = nothing -_next!(p) = ProgressMeter.next!(p) - -# -# Serial version for self-pairwise computations -# -function map_pairwise_serial!( - f::F, output, box::Box, cl::CellList{N,T}; - show_progress::Bool=false -) where {F,N,T} - @unpack n_cells_with_real_particles = cl - p = show_progress ? Progress(n_cells_with_real_particles, dt=1) : nothing - ibatch = 1 - for i in 1:n_cells_with_real_particles - cellᵢ = cl.cells[cl.cell_indices_real[i]] - output = inner_loop!(f, box, cellᵢ, cl, output, ibatch) - _next!(p) - end - return output -end - -# -# Parallel version for self-pairwise computations -# -function batch(f::F, ibatch, cell_indices, output_threaded, box, cl, p) where {F} - for i in cell_indices - cellᵢ = cl.cells[cl.cell_indices_real[i]] - output_threaded[ibatch] = inner_loop!(f, box, cellᵢ, cl, output_threaded[ibatch], ibatch) - _next!(p) - end -end - -# Note: the reason why in the next loop @spawn if followed by interpolated variables -# is to avoid allocations caused by the capturing of variables by the closures created -# by the macro. This may not be needed in the future, if the corresponding issue is solved. -# See: https://discourse.julialang.org/t/type-instability-because-of-threads-boxing-variables/78395 -function map_pairwise_parallel!( - f::F1, output, box::Box, cl::CellList{N,T}; - output_threaded=nothing, - reduce::F2=reduce, - show_progress::Bool=false -) where {F1,F2,N,T} - nbatches = cl.nbatches.map_computation - if isnothing(output_threaded) - output_threaded = [deepcopy(output) for i in 1:nbatches] - end - @unpack n_cells_with_real_particles = cl - nbatches = cl.nbatches.map_computation - p = show_progress ? Progress(n_cells_with_real_particles, dt=1) : nothing - @sync for (ibatch, cell_indices) in enumerate(index_chunks(1:n_cells_with_real_particles; n=nbatches, split=RoundRobin())) - @spawn batch($f, $ibatch, $cell_indices, $output_threaded, $box, $cl, $p) - end - return reduce(output, output_threaded) -end - -# -# Serial version for cross-interaction computations -# -function map_pairwise_serial!( - f::F, output, box::Box, - cl::CellListPair{N,T}; - show_progress::Bool=false -) where {F,N,T} - p = show_progress ? Progress(length(cl.ref), dt=1) : nothing - for i in eachindex(cl.ref) - output = inner_loop!(f, output, i, box, cl) - _next!(p) - end - return output -end - -# -# Parallel version for cross-interaction computations -# -function batch_cross(f::F, ibatch, ref_atom_indices, output_threaded, box, cl, p) where {F} - for i in ref_atom_indices - output_threaded[ibatch] = inner_loop!(f, output_threaded[ibatch], i, box, cl) - _next!(p) - end -end - -function map_pairwise_parallel!( - f::F1, output, box::Box, - cl::CellListPair{N,T}; - output_threaded=nothing, - reduce::F2=reduce, - show_progress::Bool=false -) where {F1,F2,N,T} - nbatches = cl.target.nbatches.map_computation - if isnothing(output_threaded) - output_threaded = [deepcopy(output) for i in 1:nbatches] - end - p = show_progress ? Progress(length(cl.ref), dt=1) : nothing - @sync for (ibatch, ref_atom_indices) in enumerate(index_chunks(1:length(cl.ref); n=nbatches, split=Consecutive())) - @spawn batch_cross($f, $ibatch, $ref_atom_indices, $output_threaded, $box, $cl, $p) - end - return reduce(output, output_threaded) -end - -# -# Inner loop for Orthorhombic cells is faster because we can guarantee that -# there are not repeated computations even if running over half of the cells. -# -inner_loop!(f::F, box::Box{<:OrthorhombicCellType}, cellᵢ, cl::CellList, output, ibatch) where {F<:Function} = - inner_loop!(f, neighbor_cells_forward, box, cellᵢ, cl, output, ibatch) -inner_loop!(f::F, box::Box{<:TriclinicCell}, cellᵢ, cl::CellList, output, ibatch) where {F<:Function} = - inner_loop!(f, neighbor_cells, box, cellᵢ, cl, output, ibatch) - -function inner_loop!( - f::F, - _neighbor_cells::NC, # depends on cell type - box, - cellᵢ, - cl::CellList{N,T}, - output, - ibatch -) where {F<:Function,NC<:Function,N,T} - @unpack cutoff_sqr, inv_rotation, nc = box - output = _current_cell_interactions!(box, f, cellᵢ, output) - for jcell in _neighbor_cells(box) - jc_linear = cell_linear_index(nc, cellᵢ.cartesian_index + jcell) - if cl.cell_indices[jc_linear] != 0 - cellⱼ = cl.cells[cl.cell_indices[jc_linear]] - output = _vicinal_cell_interactions!(f, box, cellᵢ, cellⱼ, cl, output, ibatch) - end - end - return output -end - -function _current_cell_interactions!(box::Box{<:OrthorhombicCellType}, f::F, cell, output) where {F<:Function} - @unpack cutoff_sqr, inv_rotation = box - # loop over list of non-repeated particles of cell ic. All particles are real - for i in 1:cell.n_particles-1 - @inbounds pᵢ = cell.particles[i] - xpᵢ = pᵢ.coordinates - for j in i+1:cell.n_particles - @inbounds pⱼ = cell.particles[j] - xpⱼ = pⱼ.coordinates - d2 = norm_sqr(xpᵢ - xpⱼ) - if d2 <= cutoff_sqr - output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) - end - end - end - return output -end - -function _current_cell_interactions!(box::Box{TriclinicCell}, f::F, cell, output) where {F<:Function} - @unpack cutoff_sqr, inv_rotation = box - # loop over all pairs, skip when i >= j, skip if neither particle is real - for i in 1:cell.n_particles - @inbounds pᵢ = cell.particles[i] - xpᵢ = pᵢ.coordinates - pᵢ.real || continue - for j in 1:cell.n_particles - @inbounds pⱼ = cell.particles[j] - (pᵢ.index >= pⱼ.index) && continue - xpⱼ = pⱼ.coordinates - d2 = norm_sqr(xpᵢ - xpⱼ) - if d2 <= cutoff_sqr - output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) - end - end - end - return output -end - -# -# Compute interactions between vicinal cells -# -function _vicinal_cell_interactions!(f::F, box::Box, cellᵢ, cellⱼ, cl::CellList{N,T}, output, ibatch) where {F<:Function,N,T} - # project particles in vector connecting cell centers - Δc = cellⱼ.center - cellᵢ.center - Δc_norm = norm(Δc) - Δc = Δc / Δc_norm - pp = project_particles!(cl.projected_particles[ibatch], cellⱼ, cellᵢ, Δc, Δc_norm, box) - if length(pp) > 0 - output = _vinicial_cells!(f, box, cellᵢ, pp, Δc, output) - end - return output -end - -# -# The criteria form skipping computations is different then in Orthorhombic or Triclinic boxes -# -function _vinicial_cells!(f::F, box::Box{<:OrthorhombicCellType}, cellᵢ, pp, Δc, output) where {F<:Function} - @unpack cutoff, cutoff_sqr, inv_rotation = box - # Loop over particles of cell icell - for i in 1:cellᵢ.n_particles - @inbounds pᵢ = cellᵢ.particles[i] - # project particle in vector connecting cell centers - xpᵢ = pᵢ.coordinates - xproj = dot(xpᵢ - cellᵢ.center, Δc) - # Partition pp array according to the current projections - n = partition!(el -> abs(el.xproj - xproj) <= cutoff, pp) - # Compute the interactions - for j in 1:n - @inbounds pⱼ = pp[j] - xpⱼ = pⱼ.coordinates - d2 = norm_sqr(xpᵢ - xpⱼ) - if d2 <= cutoff_sqr - output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) - end - end - end - return output -end - -function _vinicial_cells!(f::F, box::Box{<:TriclinicCell}, cellᵢ, pp, Δc, output) where {F<:Function} - @unpack cutoff, cutoff_sqr, inv_rotation = box - # Loop over particles of cell icell - for i in 1:cellᵢ.n_particles - @inbounds pᵢ = cellᵢ.particles[i] - # project particle in vector connecting cell centers - xpᵢ = pᵢ.coordinates - xproj = dot(xpᵢ - cellᵢ.center, Δc) - # Partition pp array according to the current projections - n = partition!(el -> abs(el.xproj - xproj) <= cutoff, pp) - # Compute the interactions - pᵢ.real || continue - for j in 1:n - @inbounds pⱼ = pp[j] - pᵢ.index >= pⱼ.index && continue - xpⱼ = pⱼ.coordinates - d2 = norm_sqr(xpᵢ - xpⱼ) - if d2 <= cutoff_sqr - output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) - end - end - end - return output -end - -#= - project_particles!(projected_particles,cellⱼ,cellᵢ,Δc,Δc_norm,box) - -# Extended help - -Projects all particles of the cell `cellⱼ` into unnitary vector `Δc` with direction -connecting the centers of `cellⱼ` and `cellᵢ`. Modifies `projected_particles`, and -returns a view of `projected particles, where only the particles for which -the projection on the direction of the cell centers still allows the particle -to be within the cutoff distance of any point of the other cell. - -=# -function project_particles!( - projected_particles, cellⱼ, cellᵢ, - Δc, Δc_norm, box::Box{UnitCellType,N} -) where {UnitCellType,N} - if box.lcell == 1 - margin = box.cutoff + Δc_norm / 2 # half of the distance between centers - else - margin = box.cutoff * (1 + sqrt(N) / 2) # half of the diagonal of the cutoff-box - end - iproj = 0 - @inbounds for j in 1:cellⱼ.n_particles - pⱼ = cellⱼ.particles[j] - xproj = dot(pⱼ.coordinates - cellᵢ.center, Δc) - if abs(xproj) <= margin - iproj += 1 - projected_particles[iproj] = ProjectedParticle(pⱼ.index, xproj, pⱼ.coordinates) - end - end - pp = @view(projected_particles[1:iproj]) - return pp -end - -# -# Inner loop of cross-interaction computations. If the two sets -# are large, this might(?) be improved by computing two separate -# cell lists and using projection and partitioning. -# -function inner_loop!( - f, output, i, box, - cl::CellListPair{N,T} -) where {N,T} - @unpack nc, cutoff_sqr, inv_rotation, rotation = box - xpᵢ = box.rotation * wrap_to_first(cl.ref[i], box.input_unit_cell.matrix) - ic = particle_cell(xpᵢ, box) - for neighbor_cell in current_and_neighbor_cells(box) - jc_cartesian = neighbor_cell + ic - jc_linear = cell_linear_index(nc, jc_cartesian) - # If cellⱼ is empty, cycle - if cl.target.cell_indices[jc_linear] == 0 - continue - end - cellⱼ = cl.target.cells[cl.target.cell_indices[jc_linear]] - # loop over particles of cellⱼ - for j in 1:cellⱼ.n_particles - @inbounds pⱼ = cellⱼ.particles[j] - xpⱼ = pⱼ.coordinates - d2 = norm_sqr(xpᵢ - xpⱼ) - if d2 <= cutoff_sqr - output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, i, pⱼ.index, d2, output) - end - end - end - return output -end - - diff --git a/src/ParticleSystem.jl b/src/ParticleSystem.jl index 46530ca9..58ff037f 100644 --- a/src/ParticleSystem.jl +++ b/src/ParticleSystem.jl @@ -24,7 +24,6 @@ const SupportedCoordinatesTypes = Union{Nothing,AbstractVector{<:AbstractVector} output_name::Symbol, parallel::Bool=true, nbatches::Tuple{Int,Int}=(0, 0), - autoswap::Bool = true, validate_coordinates::Union{Nothing,Function}=_validate_coordinates ) @@ -56,9 +55,7 @@ structure using the `system.forces` notation. The `parallel` and `nbatches` flags control the parallelization scheme of computations (see https://m3g.github.io/CellListMap.jl/stable/parallelization/#Number-of-batches)). By default the parallelization is turned on and `nbatches` is set with heuristics -that may provide good efficiency in most cases. `autoswap = false` will guarantee that -the cell lists will be buitl for the `ypositions` (by default they are constructed -for the smallest set, which is faster). +that may provide good efficiency in most cases. The `validate_coordinates` function can be used to validate the coordinates before the construction of the system. If `nothing`, no validation is performed. @@ -74,9 +71,9 @@ the particles that are within the cutoff: ```jldoctest ;filter = r"(\\d*)\\.(\\d)\\d+" => s"\\1.\\2" julia> using CellListMap -julia> using PDBTools: readPDB, coor +julia> using PDBTools: read_pdb, coor -julia> positions = coor(readPDB(CellListMap.argon_pdb_file)); +julia> positions = coor(read_pdb(CellListMap.argon_pdb_file)); julia> sys = ParticleSystem( xpositions = positions, @@ -93,9 +90,9 @@ julia> map_pairwise!((x,y,i,j,d2,output) -> output += d2, sys) ```jldoctest ;filter = r"(\\d*)\\.(\\d{2})\\d+" => s"\\1.\\2" julia> using CellListMap, PDBTools -julia> xpositions = coor(readPDB(CellListMap.argon_pdb_file))[1:50]; +julia> xpositions = coor(read_pdb(CellListMap.argon_pdb_file))[1:50]; -julia> ypositions = coor(readPDB(CellListMap.argon_pdb_file))[51:100]; +julia> ypositions = coor(read_pdb(CellListMap.argon_pdb_file))[51:100]; julia> sys = ParticleSystem( xpositions = xpositions, @@ -121,7 +118,6 @@ function ParticleSystem(; parallel::Bool=true, nbatches::Tuple{Int,Int}=(0, 0), lcell=1, - autoswap::Bool=true, validate_coordinates::Union{Nothing,Function}=_validate_coordinates, ) # Set xpositions if positions was set @@ -161,16 +157,16 @@ function ParticleSystem(; _box = CellListMap.Box(unitcell, cutoff, lcell=lcell) _cell_list = CellListMap.CellList(xpositions, _box; parallel, nbatches, validate_coordinates) _aux = CellListMap.AuxThreaded(_cell_list) - _output_threaded = [copy_output(output) for _ in 1:CellListMap.nbatches(_cell_list)] + _output_threaded = [copy_output(output) for _ in 1:CellListMap.nbatches(_cell_list, :map)] output = _reset_all_output!(output, _output_threaded) sys = ParticleSystem1{output_name}(xpositions, output, _box, _cell_list, _output_threaded, _aux, parallel, validate_coordinates) # Two sets of positions else unitcell = isnothing(unitcell) ? limits(xpositions, ypositions; validate_coordinates) : unitcell _box = CellListMap.Box(unitcell, cutoff, lcell=lcell) - _cell_list = CellListMap.CellList(xpositions, ypositions, _box; parallel, nbatches, autoswap, validate_coordinates) + _cell_list = CellListMap.CellList(xpositions, ypositions, _box; parallel, nbatches, validate_coordinates) _aux = CellListMap.AuxThreaded(_cell_list) - _output_threaded = [copy_output(output) for _ in 1:CellListMap.nbatches(_cell_list)] + _output_threaded = [copy_output(output) for _ in 1:CellListMap.nbatches(_cell_list, :map)] output = _reset_all_output!(output, _output_threaded) sys = ParticleSystem2{output_name}(xpositions, ypositions, output, _box, _cell_list, _output_threaded, _aux, parallel, validate_coordinates) end @@ -392,8 +388,7 @@ function Base.show(io::IO, mime::MIME"text/plain", sys::ParticleSystem1{OutputNa show(IOContext(io, :indent => indent + 4), mime, sys._box) println(io) show(io_sub, mime, sys._cell_list) - println(io, "\n Parallelization auxiliary data set for: ") - show(io_sub, mime, sys._cell_list.nbatches) + print(io, "\n Parallelization auxiliary data set for $(nbatches(sys._cell_list, :build)) batch(es).") print(io, "\n Type of output variable ($OutputName): $(typeof(sys.output))") end @@ -405,8 +400,7 @@ function Base.show(io::IO, mime::MIME"text/plain", sys::ParticleSystem2{OutputNa show(IOContext(io, :indent => indent + 4), mime, sys._box) println(io) show(io_sub, mime, sys._cell_list) - println(io, "\n Parallelization auxiliary data set for: ") - show(io_sub, mime, sys._cell_list.target.nbatches) + print(io, "\n Parallelization auxiliary data set for $(nbatches(sys._cell_list, :build)) batch(es).") print(io, "\n Type of output variable ($OutputName): $(typeof(sys.output))") end @@ -573,7 +567,7 @@ in a simulation box. ```jldoctest ;filter = r"(\\d*)\\.(\\d{4})\\d+" => s"\\1.\\2" julia> using CellListMap, PDBTools -julia> positions = coor(readPDB(CellListMap.argon_pdb_file)); +julia> positions = coor(read_pdb(CellListMap.argon_pdb_file)); julia> struct MinimumDistance d::Float64 end # Custom output type @@ -607,6 +601,7 @@ function reducer!(x, y) with the appropriate way to combine two instances of the type (summing, keeping the minimum, etc), such that threaded computations can be reduced. + """)) end reducer!(x::T, y::T) where {T<:SupportedTypes} = +(x, y) @@ -718,7 +713,7 @@ where the size of the simulation box changes during the simulation. ```jldoctest ;filter = r"batches.*" => "" julia> using CellListMap, StaticArrays, PDBTools -julia> xpositions = coor(readPDB(CellListMap.argon_pdb_file)); +julia> xpositions = coor(read_pdb(CellListMap.argon_pdb_file)); julia> sys = ParticleSystem( xpositions = xpositions, @@ -739,9 +734,7 @@ ParticleSystem1{output} of dimension 3, composed of: 100 real particles. 8 cells with real particles. 800 particles in computing box, including images. - Parallelization auxiliary data set for: - Number of batches for cell list construction: 1 - Number of batches for function mapping: 1 + Parallelization auxiliary data set for 2 batch(es). Type of output variable (output): Float64 ``` @@ -751,6 +744,7 @@ function update_unitcell!(sys, unitcell) if unitcelltype(sys) == NonPeriodicCell throw(ArgumentError("""\n Manual updating of the unit cell of non-periodic systems is not allowed. + """)) end sys._box = update_box(sys._box; unitcell) @@ -796,7 +790,7 @@ the cutoff to `10.0`. ```jldoctest ; filter = r"batches.*" => "" julia> using CellListMap, PDBTools -julia> x = coor(readPDB(CellListMap.argon_pdb_file)); +julia> x = coor(read_pdb(CellListMap.argon_pdb_file)); julia> sys = ParticleSystem( xpositions = x, @@ -817,9 +811,7 @@ ParticleSystem1{output} of dimension 3, composed of: 100 real particles. 8 cells with real particles. 800 particles in computing box, including images. - Parallelization auxiliary data set for: - Number of batches for cell list construction: 8 - Number of batches for function mapping: 8 + Parallelization auxiliary data set for 2 batch(es). Type of output variable (output): Float64 ``` """ @@ -857,7 +849,7 @@ end @test a == 0 # Update cutoff of non-periodic systems - x = coor(readPDB(CellListMap.argon_pdb_file)) + x = coor(read_pdb(CellListMap.argon_pdb_file)) sys1 = ParticleSystem(xpositions=x, cutoff=8.0, output=0.0) @test unitcelltype(sys1) == NonPeriodicCell @test sys1.unitcell ≈ [35.63 0.0 0.0; 0.0 35.76 0.0; 0.0 0.0 35.79] atol = 1e-2 @@ -916,14 +908,6 @@ function UpdateParticleSystem!(sys::ParticleSystem1, update_lists::Bool=true) return sys end -function _update_ref_positions!(cl::CellListPair{V,N,T,Swap}, sys) where {V,N,T,Swap<:NotSwapped} - isnothing(sys.validate_coordinates) || sys.validate_coordinates(sys.xpositions) - resize!(cl.ref, length(sys.xpositions)) - cl.ref .= sys.xpositions -end -function _update_ref_positions!(::CellListPair{V,N,T,Swap}, sys) where {V,N,T,Swap<:Swapped} - throw(ArgumentError("update_lists == false requires autoswap == false for 2-set systems.")) -end function UpdateParticleSystem!(sys::ParticleSystem2, update_lists::Bool=true) if update_lists if unitcelltype(sys) == NonPeriodicCell @@ -938,9 +922,6 @@ function UpdateParticleSystem!(sys::ParticleSystem2, update_lists::Bool=true) parallel=sys.parallel, validate_coordinates=sys.validate_coordinates, ) - else - # Always update the reference set positions (the cell lists of the target set are not updated) - _update_ref_positions!(sys._cell_list, sys) end return sys end @@ -985,17 +966,6 @@ end a = @ballocated CellListMap.UpdateParticleSystem!($sys) samples = 1 evals = 1 @test a == 0 - # Throw error when trying to *not* update lists with autoswap on: - sys = ParticleSystem( - xpositions=rand(SVector{2,Float64}, 100), - ypositions=rand(SVector{2,Float64}, 200), - unitcell=[1, 1], - cutoff=0.1, - output=0.0, - autoswap=true, - ) - @test_throws ArgumentError map_pairwise!((_, _, _, _, d2, u) -> u += d2, sys, update_lists=false) - end """ diff --git a/src/core_computing/auxiliary_functions.jl b/src/core_computing/auxiliary_functions.jl new file mode 100644 index 00000000..734ff716 --- /dev/null +++ b/src/core_computing/auxiliary_functions.jl @@ -0,0 +1,131 @@ +#= + reduce(output, output_threaded) + +Most common reduction function, which sums the elements of the output. +Here, `output_threaded` is a vector containing `nbatches(cl)` copies of +the `output` variable (a scalar or an array). Custom reduction functions +must replace this one if the reduction operation is not a simple sum. +The `output_threaded` array is, by default, created automatically by copying +the given `output` variable `nbatches(cl)` times. + +## Examples + +Scalar reduction: + +```julia-repl +julia> output = 0.; output_threaded = [ 1, 2 ]; + +julia> CellListMap.reduce(output,output_threaded) +3 +``` + +Array reduction: + +```julia-repl +julia> output = [0,0]; output_threaded = [ [1,1], [2,2] ]; + +julia> CellListMap.reduce(output,output_threaded) +2-element Vector{Int64}: + 3 + 3 + +julia> output +2-element Vector{Int64}: + 3 + 3 +``` + +=# +reduce(output::T, output_threaded::Vector{T}) where {T} = sum(output_threaded) +function reduce(output::T, output_threaded::Vector{T}) where {T<:AbstractArray} + for ibatch in eachindex(output_threaded) + @. output += output_threaded[ibatch] + end + return output +end +function reduce(output, output_threaded) + T = typeof(output) + throw(ArgumentError("""\n + No method matching reduce(::$(typeof(output)),::$(typeof(output_threaded))) + + Please provide a method that appropriately reduces a `Vector{$T}`, with + the signature: + + ``` + custom_reduce(output::$T, output_threaded::Vector{$T}) + ``` + + The reduction function **must** return the `output` variable, even + if it is mutable. + + See: https://m3g.github.io/CellListMap.jl/stable/parallelization/#Custom-reduction-functions + + """)) +end + +# +# Auxiliary functions to control the exibition of the progress meter +# +_next!(::Nothing) = nothing +_next!(p) = ProgressMeter.next!(p) + +# +# Functions necessary for the projection/partition scheme +# +#= + partition!(by, x::AbstractVector) + +# Extended help + +Function that reorders `x` vector by putting in the first positions the +elements with values satisfying `by(el)`. Returns the number of elements +that satisfy the condition. + +=# +function partition!(by, x::AbstractVector) + iswap = 1 + @inbounds for i in eachindex(x) + if by(x[i]) + if iswap != i + x[iswap], x[i] = x[i], x[iswap] + end + iswap += 1 + end + end + return iswap - 1 +end + +#= + project_particles!(projected_particles,cellⱼ,cellᵢ,Δc,Δc_norm,box) + +# Extended help + +Projects all particles of the cell `cellⱼ` into unnitary vector `Δc` with direction +connecting the centers of `cellⱼ` and `cellᵢ`. Modifies `projected_particles`, and +returns a view of `projected particles, where only the particles for which +the projection on the direction of the cell centers still allows the particle +to be within the cutoff distance of any point of the other cell. + +=# +function project_particles!( + projected_particles, cellⱼ, cellᵢ, + Δc, Δc_norm, box::Box{UnitCellType,N} +) where {UnitCellType,N} + if box.lcell == 1 + margin = box.cutoff + Δc_norm / 2 # half of the distance between centers + else + margin = box.cutoff * (1 + sqrt(N) / 2) # half of the diagonal of the cutoff-box + end + iproj = 0 + @inbounds for j in 1:cellⱼ.n_particles + pⱼ = cellⱼ.particles[j] + xproj = dot(pⱼ.coordinates - cellᵢ.center, Δc) + if abs(xproj) <= margin + iproj += 1 + projected_particles[iproj] = ProjectedParticle(pⱼ.index, xproj, pⱼ.coordinates) + end + end + pp = @view(projected_particles[1:iproj]) + return pp +end + diff --git a/src/core_computing/cross.jl b/src/core_computing/cross.jl new file mode 100644 index 00000000..bbd5e45a --- /dev/null +++ b/src/core_computing/cross.jl @@ -0,0 +1,344 @@ +""" + map_pairwise!(f::Function, output, box::Box, cl::CellListPair) + +Evaluate function f for pairs in two independent sets of particles, for which the `CellListPair` +object was constructed. + +""" +function map_pairwise!(f::F1, output, box::Box, cl::CellListPair{N,T}; + # Parallelization options + parallel::Bool=true, + output_threaded=nothing, + reduce::F2=reduce, + show_progress::Bool=false +) where {F1,F2,N,T} # F1, F2 Needed for specialization for these functions + fswap(x,y,i,j,d2,output) = f(y,x,j,i,d2,output) + if !cl.swap + if parallel + output = map_pairwise_parallel!( + f,output,box,cl; + output_threaded=output_threaded, + reduce=reduce, + show_progress=show_progress + ) + else + output = map_pairwise_serial!(f,output,box,cl,show_progress=show_progress) + end + else + if parallel + output = map_pairwise_parallel!( + fswap,output,box,cl; + output_threaded=output_threaded, + reduce=reduce, + show_progress=show_progress + ) + else + output = map_pairwise_serial!(fswap,output,box,cl,show_progress=show_progress) + end + end + return output +end + +# +# Serial version for cross-interaction computations +# +function map_pairwise_serial!( + f::F, output, box::Box, cl::CellListPair; + show_progress::Bool=false +) where {F<:Function} + @unpack n_cells_with_real_particles = cl.small_set + p = show_progress ? Progress(n_cells_with_real_particles, dt=1) : nothing + ibatch = 1 + for i in 1:n_cells_with_real_particles + cellᵢ = cl.small_set.cells[cl.small_set.cell_indices_real[i]] + output = inner_loop!(f, box, cellᵢ, cl, output, ibatch) + _next!(p) + end + return output +end + +# +# Parallel version for cross computations +# +function batch(f::F, ibatch, cell_indices, output_threaded, box, cl::CellListPair, p) where {F} + for i in cell_indices + cellᵢ = cl.small_set.cells[cl.small_set.cell_indices_real[i]] + output_threaded[ibatch] = inner_loop!(f, box, cellᵢ, cl, output_threaded[ibatch], ibatch) + _next!(p) + end +end + +# Note: the reason why in the next loop @spawn if followed by interpolated variables +# is to avoid allocations caused by the capturing of variables by the closures created +# by the macro. This may not be needed in the future, if the corresponding issue is solved. +# See: https://discourse.julialang.org/t/type-instability-because-of-threads-boxing-variables/78395 +function map_pairwise_parallel!( + f::F1, output, box::Box, cl::CellListPair{N,T}; + output_threaded=nothing, + reduce::F2=reduce, + show_progress::Bool=false +) where {F1,F2,N,T} + _nbatches = nbatches(cl, :map) + if isnothing(output_threaded) + output_threaded = [deepcopy(output) for i in 1:_nbatches] + end + @unpack n_cells_with_real_particles = cl.small_set + p = show_progress ? Progress(n_cells_with_real_particles, dt=1) : nothing + @sync for (ibatch, cell_indices) in enumerate(index_chunks(1:n_cells_with_real_particles; n=_nbatches, split=RoundRobin())) + @spawn batch($f, $ibatch, $cell_indices, $output_threaded, $box, $cl, $p) + end + return reduce(output, output_threaded) +end + +# +# The interactions do not skip the i>=j in any type of cell. +# +function inner_loop!( + f::F, + box, + cellᵢ, + cl::CellListPair{N,T}, + output, + ibatch +) where {F<:Function,N,T} + @unpack cutoff_sqr, inv_rotation, nc = box + jc_linear = cell_linear_index(nc, cellᵢ.cartesian_index) + if cl.large_set.cell_indices[jc_linear] != 0 + cellⱼ = cl.large_set.cells[cl.large_set.cell_indices[jc_linear]] + output = _current_cell_interactions!(box, f, cellᵢ, cellⱼ, output) + end + for jcell in neighbor_cells(box) + jc_linear = cell_linear_index(nc, cellᵢ.cartesian_index + jcell) + if cl.large_set.cell_indices[jc_linear] != 0 + cellⱼ = cl.large_set.cells[cl.large_set.cell_indices[jc_linear]] + output = _vicinal_cell_interactions!(f, box, cellᵢ, cellⱼ, cl.large_set, output, ibatch) + end + end + return output +end + +# +# Interactions in the current cell +# +# Providing two cells for this function indicates that this is a cross-interaction, thus we need +# to loop over all pairs of particles. +# +function _current_cell_interactions!(box::Box, f::F, cellᵢ::Cell, cellⱼ::Cell, output) where {F<:Function} + @unpack cutoff_sqr, inv_rotation = box + for i in 1:cellᵢ.n_particles + @inbounds pᵢ = cellᵢ.particles[i] + xpᵢ = pᵢ.coordinates + pᵢ.real || continue + for j in 1:cellⱼ.n_particles + @inbounds pⱼ = cellⱼ.particles[j] + xpⱼ = pⱼ.coordinates + d2 = norm_sqr(xpᵢ - xpⱼ) + if d2 <= cutoff_sqr + output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) + end + end + end + return output +end + +# +# Cross-computations when only one cell list was computed +# +""" + map_pairwise!(f::Function, x::AbstractVector{<:AbstractVector}, sys::ParticleSystem1; kargs...) + map_pairwise!(f::Function, x::AbstractMatrix, sys::ParticleSystem1; kargs...) + +Evaluate function f for pairs in two independent sets of particles, where the `sys::ParicleSystem1` object +contains the previously computed cell lists of one set of particles, and the second set is given by the +array of positions `x`. + +This function can be advantageous over computing the interactions with `CellListPair`, because here the +cell lists are only computed for one set. This is advantageous in two situations: + + 1. The second set of particles is not changing, and the first set is changing. Thus, the cell lists + of the second set can be computed only once. + 2. One of the sets is much smaller than the other. In this case, computing the cell lists of the largest + set might be too expensive. Construct the `ParticleSystem` object for the smallest set, and use this + function to compute the interactions with the largest set. + +## Keyword arguments: + +- `show_progress::Bool=false`: Show progress bar. +- `update_lists::Bool=true`: Update the cell lists or not. If the positions of the `ParticleSystem1` object + have not changed, it is not necessary to update the cell lists. + +## Example + +```julia-repl +julia> using CellListMap, StaticArrays + +julia> x = rand(SVector{3,Float64}, 1000); + +julia> sys = ParticleSystem(positions=x, unitcell=[1.0, 1.0, 1.0], cutoff=0.1, output=0.0); + +julia> y = rand(SVector{3,Float64}, 100); + +julia> map_pairwise((x,y,i,j,d2,output) -> output + sqrt(d2), y, sys; update_lists=false) # Compute the sum of the distances of x and y +31.121496300032163 + +julia> z = rand(SVector{3,Float64}, 200); + +julia> map_pairwise((x,y,i,j,d2,output) -> output + sqrt(d2), z, sys; update_lists=false) # Compute the sum of the distances x and z +63.57860511891242 +``` + +### Note that, in this case, if the computation is run serially, it is completely non-allocating: + +```jldoctest +julia> using CellListMap, StaticArrays, BenchmarkTools + +julia> sys = ParticleSystem(positions=rand(SVector{3,Float64}, 1000), unitcell=[1.0, 1.0, 1.0], cutoff=0.1, output=0.0, parallel=false); + +julia> y = rand(SVector{3,Float64}, 100); + +julia> f(x,y,i,j,d2,output) = output + sqrt(d2); + +julia> @ballocated map_pairwise(\$f, \$y, \$sys; update_lists=false) samples=1 evals=1 +0 +``` + +""" +function map_pairwise!( + f::F, x::AbstractVecOrMat, sys::ParticleSystem1; + show_progress::Bool=false, update_lists::Bool=true, +) where {F<:Function} + sys.output = _reset_all_output!(sys.output, sys._output_threaded) + UpdateParticleSystem!(sys, update_lists) + sys.output = map_pairwise!( + f, sys.output, sys._box, x, sys._cell_list; + output_threaded=sys._output_threaded, + reduce=(output, output_threaded) -> reduce_output!(reducer, output, output_threaded), + sys.parallel, show_progress, + ) + return sys.output +end + +# +# The splitting of serial and parallel versions was done to avoid allocations +# associated to the code of creating of `output_threaded` in the serial version, despite +# the fact that it is not used and initialized with `nothing`. +# +function _serial_map_pairwise_x_vs_sys!( + f::F1, output, box::Box, x::AbstractVector{<:AbstractVector}, cl::CellList{N,T}; + show_progress::Bool=false, +) where {F1<:Function, N, T} + p = show_progress ? Progress(length(x), dt=1) : nothing + for i in eachindex(x) + xs = SVector{N,T}(x[i]) + output = single_particle_vs_list!(f, output, box, i, xs, cl) + _next!(p) + end + return output +end + +function _batch_x_vs_sys!(f::F, x, x_atom_indices, ibatch, output_threaded, box, cl, p) where {F<:Function} + for i in x_atom_indices + output_threaded[ibatch] = single_particle_vs_list!(f, output_threaded[ibatch], box, i, x[i], cl) + _next!(p) + end +end + +function _parallel_map_pairwise_x_vs_sys!( + f::F1, output, box::Box, x::AbstractVector{<:AbstractVector}, cl::CellList{N,T}; + show_progress::Bool=false, output_threaded=nothing, reduce::F2=reduce, +) where {F1<:Function, F2<:Function, N, T} + _nbatches = nbatches(cl, :map) + if isnothing(output_threaded) + output_threaded = [deepcopy(output) for _ in 1:_nbatches] + end + p = show_progress ? Progress(length(x), dt=1) : nothing + @sync for (ibatch, x_atom_indices) in enumerate(index_chunks(1:length(x); n=_nbatches, split=Consecutive())) + @spawn _batch_x_vs_sys!($f, $x, $x_atom_indices, $ibatch, $output_threaded, $box, $cl, $p) + end + return reduce(output, output_threaded) +end + +function map_pairwise!( + f::F1, output, box::Box, x::AbstractVector{<:AbstractVector}, cl::CellList{N,T}; + parallel::Bool=true, show_progress::Bool=false, output_threaded=nothing, reduce::F2=reduce, +) where {F1<:Function, F2<:Function, N, T} + output = if parallel + _parallel_map_pairwise_x_vs_sys!(f, output, box, x, cl; show_progress, output_threaded, reduce) + else + _serial_map_pairwise_x_vs_sys!(f, output, box, x, cl; show_progress) + end + return output +end + +function map_pairwise!( + f::F1, output, box::Box, x::AbstractMatrix, cl::CellList{N}; + parallel::Bool=true, show_progress::Bool=false, output_threaded=nothing, reduce::F2=reduce, +) where {N, F1<:Function, F2<:Function} + size(x, 1) == N || throw(DimensionMismatch("First dimension of input matrix must be $N")) + x_re = reinterpret(reshape, SVector{N,eltype(x)}, x) + return map_pairwise!(f, output, box, x_re, cl; parallel, show_progress, output_threaded, reduce) +end + +function single_particle_vs_list!( + f::F, output, box::Box{<:PeriodicCellType}, + i::Integer, x::SVector{N,T}, + cl::CellList{N,T}; +) where {F,N,T} + @unpack nc, cutoff_sqr, inv_rotation = box + xpᵢ = box.rotation * wrap_to_first(x, box.input_unit_cell.matrix) + ic = particle_cell(xpᵢ, box) + for neighbor_cell in current_and_neighbor_cells(box) + jc_cartesian = neighbor_cell + ic + jc_linear = cell_linear_index(nc, jc_cartesian) + # If cellⱼ is empty, cycle + if cl.cell_indices[jc_linear] == 0 + continue + end + cellⱼ = cl.cells[cl.cell_indices[jc_linear]] + # loop over particles of cellⱼ + for j in 1:cellⱼ.n_particles + @inbounds pⱼ = cellⱼ.particles[j] + xpⱼ = pⱼ.coordinates + d2 = norm_sqr(xpᵢ - xpⱼ) + if d2 <= cutoff_sqr + output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, i, pⱼ.index, d2, output) + end + end + end + return output +end + +function single_particle_vs_list!( + f::F, output, box::Box{NonPeriodicCell}, + i::Integer, x::SVector{N,T}, + cl::CellList{N,T}; +) where {F,N,T} + @unpack nc, cutoff_sqr, inv_rotation = box + xpᵢ = x + ic = particle_cell(xpᵢ, box) + for neighbor_cell in current_and_neighbor_cells(box) + jc_cartesian = neighbor_cell + ic + # if cell is outside computing box, cycle + if !all(ntuple(i-> 1 .<= jc_cartesian[i] .<= nc[i], N)) + continue + end + # If cellⱼ is empty, cycle + jc_linear = cell_linear_index(nc, jc_cartesian) + if cl.cell_indices[jc_linear] == 0 + continue + end + cellⱼ = cl.cells[cl.cell_indices[jc_linear]] + # loop over particles of cellⱼ + for j in 1:cellⱼ.n_particles + @inbounds pⱼ = cellⱼ.particles[j] + if pⱼ.real + xpⱼ = pⱼ.coordinates + d2 = norm_sqr(xpᵢ - xpⱼ) + if d2 <= cutoff_sqr + output = f(xpᵢ, inv_rotation * xpⱼ, i, pⱼ.index, d2, output) + end + end + end + end + return output +end \ No newline at end of file diff --git a/src/core_computing/self.jl b/src/core_computing/self.jl new file mode 100644 index 00000000..deb8fd54 --- /dev/null +++ b/src/core_computing/self.jl @@ -0,0 +1,210 @@ +""" + map_pairwise!( + f::Function, + output, + box::Box, + cl::CellList + ;parallel::Bool=true, + show_progress::Bool=false + ) + +This function will run over every pair of particles which are closer than +`box.cutoff` and compute the Euclidean distance between the particles, +considering the periodic boundary conditions given in the `Box` structure. +If the distance is smaller than the cutoff, a function `f` of the +coordinates of the two particles will be computed. + +The function `f` receives six arguments as input: +``` +f(x,y,i,j,d2,output) +``` +Which are the coordinates of one particle, the coordinates of the +second particle, the index of the first particle, the index of the second +particle, the squared distance between them, and the `output` variable. +It has also to return the same `output` variable. Thus, `f` may or not +mutate `output`, but in either case it must return it. With that, it is +possible to compute an average property of the distance of the particles +or, for example, build a histogram. The squared distance `d2` is computed +internally for comparison with the +`cutoff`, and is passed to the `f` because many times it is used for the +desired computation. + +## Example + +Computing the mean absolute difference in `x` position between random particles, +remembering the number of pairs of `n` particles is `n(n-1)/2`. The function does +not use the indices or the distance, such that we remove them from the parameters +by using a closure. + +```julia-repl +julia> n = 100_000; + +julia> box = Box([250,250,250],10); + +julia> x = [ SVector{3,Float64}(sides .* rand(3)) for i in 1:n ]; + +julia> cl = CellList(x,box); + +julia> f(x,y,sum_dx) = sum_dx + abs(x[1] - y[1]) + +julia> normalization = N / (N*(N-1)/2) # (number of particles) / (number of pairs) + +julia> avg_dx = normalization * map_parwise!((x,y,i,j,d2,sum_dx) -> f(x,y,sum_dx), 0.0, box, cl) + +``` + +""" +function map_pairwise!(f::F, output, box::Box, cl::CellList; + # Parallelization options + parallel::Bool=true, + output_threaded=nothing, + reduce::Function=reduce, + show_progress::Bool=false, +) where {F} # Needed for specialization for this function (avoids some allocations) + if parallel + output = map_pairwise_parallel!( + f,output,box,cl; + output_threaded=output_threaded, + reduce=reduce, + show_progress=show_progress + ) + else + output = map_pairwise_serial!(f,output,box,cl,show_progress=show_progress) + end + return output +end + +# +# Serial version for self-pairwise computations +# +function map_pairwise_serial!( + f::F, output, box::Box, cl::CellList{N,T}; + show_progress::Bool=false +) where {F,N,T} + @unpack n_cells_with_real_particles = cl + p = show_progress ? Progress(n_cells_with_real_particles, dt=1) : nothing + ibatch = 1 + for i in 1:n_cells_with_real_particles + cellᵢ = cl.cells[cl.cell_indices_real[i]] + output = inner_loop!(f, box, cellᵢ, cl, output, ibatch) + _next!(p) + end + return output +end + +# +# Parallel version for self-pairwise computations +# +function batch(f::F, ibatch, cell_indices, output_threaded, box, cl::CellList, p) where {F} + for i in cell_indices + cellᵢ = cl.cells[cl.cell_indices_real[i]] + output_threaded[ibatch] = inner_loop!(f, box, cellᵢ, cl, output_threaded[ibatch], ibatch) + _next!(p) + end +end + +# Note: the reason why in the next loop @spawn if followed by interpolated variables +# is to avoid allocations caused by the capturing of variables by the closures created +# by the macro. This may not be needed in the future, if the corresponding issue is solved. +# See: https://discourse.julialang.org/t/type-instability-because-of-threads-boxing-variables/78395 +function map_pairwise_parallel!( + f::F1, output, box::Box, cl::CellList{N,T}; + output_threaded=nothing, + reduce::F2=reduce, + show_progress::Bool=false +) where {F1,F2,N,T} + _nbatches = nbatches(cl, :map) + if isnothing(output_threaded) + output_threaded = [deepcopy(output) for i in 1:_nbatches] + end + @unpack n_cells_with_real_particles = cl + p = show_progress ? Progress(n_cells_with_real_particles, dt=1) : nothing + @sync for (ibatch, cell_indices) in enumerate(index_chunks(1:n_cells_with_real_particles; n=_nbatches, split=RoundRobin())) + @spawn batch($f, $ibatch, $cell_indices, $output_threaded, $box, $cl, $p) + end + return reduce(output, output_threaded) +end + +# +# Inner loop for self-interaction computations (cl::CellList) +# + +# +# Inner loop for Orthorhombic cells is faster because we can guarantee that +# there are not repeated computations even if running over half of the cells. +# +inner_loop!(f::F, box::Box{<:OrthorhombicCellType}, cellᵢ, cl::CellList, output, ibatch) where {F<:Function} = + inner_loop!(f, neighbor_cells_forward, box, cellᵢ, cl, output, ibatch) +inner_loop!(f::F, box::Box{<:TriclinicCell}, cellᵢ, cl::CellList, output, ibatch) where {F<:Function} = + inner_loop!(f, neighbor_cells, box, cellᵢ, cl, output, ibatch) + +# +# The call to the current_cell function +# has a single cell as input, and vicinal cell interactions skip the i>=j for +# triclinic cells. +# +function inner_loop!( + f::F, + _neighbor_cells::NC, # depends on cell type + box, + cellᵢ, + cl::CellList{N,T}, + output, + ibatch +) where {F<:Function,NC<:Function,N,T} + @unpack cutoff_sqr, inv_rotation, nc = box + output = _current_cell_interactions!(box, f, cellᵢ, output) + for jcell in _neighbor_cells(box) + jc_linear = cell_linear_index(nc, cellᵢ.cartesian_index + jcell) + if cl.cell_indices[jc_linear] != 0 + cellⱼ = cl.cells[cl.cell_indices[jc_linear]] + output = _vicinal_cell_interactions!(f, box, cellᵢ, cellⱼ, cl, output, ibatch; skip=true) + end + end + return output +end + +# +# Interactions in the current cell +# + +# Call with single cell: this implies that this is a self-computation, and thus we loop over the +# upper triangle only in the case of the Orthorhombic cell +function _current_cell_interactions!(box::Box{<:OrthorhombicCellType}, f::F, cell, output) where {F<:Function} + @unpack cutoff_sqr, inv_rotation = box + # loop over list of non-repeated particles of cell ic. All particles are real + for i in 1:cell.n_particles-1 + @inbounds pᵢ = cell.particles[i] + xpᵢ = pᵢ.coordinates + for j in i+1:cell.n_particles + @inbounds pⱼ = cell.particles[j] + xpⱼ = pⱼ.coordinates + d2 = norm_sqr(xpᵢ - xpⱼ) + if d2 <= cutoff_sqr + output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) + end + end + end + return output +end + +# And loop over all pairs but skipping when i >= j when the box is triclinic +function _current_cell_interactions!(box::Box{TriclinicCell}, f::F, cell, output) where {F<:Function} + @unpack cutoff_sqr, inv_rotation = box + # loop over all pairs, skip when i >= j, skip if neither particle is real + for i in 1:cell.n_particles + @inbounds pᵢ = cell.particles[i] + xpᵢ = pᵢ.coordinates + pᵢ.real || continue + for j in 1:cell.n_particles + @inbounds pⱼ = cell.particles[j] + (pᵢ.index >= pⱼ.index) && continue + xpⱼ = pⱼ.coordinates + d2 = norm_sqr(xpᵢ - xpⱼ) + if d2 <= cutoff_sqr + output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) + end + end + end + return output +end diff --git a/src/core_computing/vicinal_cells.jl b/src/core_computing/vicinal_cells.jl new file mode 100644 index 00000000..2f8ac207 --- /dev/null +++ b/src/core_computing/vicinal_cells.jl @@ -0,0 +1,71 @@ +# +# Compute interactions between vicinal cells +# +function _vicinal_cell_interactions!(f::F, box::Box, cellᵢ, cellⱼ, cl::CellList{N,T}, output, ibatch; skip=nothing) where {F<:Function,N,T} + # project particles in vector connecting cell centers + Δc = cellⱼ.center - cellᵢ.center + Δc_norm = norm(Δc) + Δc = Δc / Δc_norm + pp = project_particles!(cl.projected_particles[ibatch], cellⱼ, cellᵢ, Δc, Δc_norm, box) + if length(pp) > 0 + output = _vinicial_cells!(f, box, cellᵢ, pp, Δc, output; skip) + end + return output +end + +# +# The criteria form skipping computations is different then in Orthorhombic or Triclinic boxes. The +# first parameter (the type Self, or Cross, computation, is not needed here, because the symmetry +# allows to never compute repeated interactions anyway. +# +function _vinicial_cells!(f::F, box::Box{<:OrthorhombicCellType}, cellᵢ, pp, Δc, output; skip=nothing) where {F<:Function} + @unpack cutoff, cutoff_sqr, inv_rotation = box + # Loop over particles of cell icell + for i in 1:cellᵢ.n_particles + @inbounds pᵢ = cellᵢ.particles[i] + # project particle in vector connecting cell centers + xpᵢ = pᵢ.coordinates + xproj = dot(xpᵢ - cellᵢ.center, Δc) + # Partition pp array according to the current projections + n = partition!(el -> abs(el.xproj - xproj) <= cutoff, pp) + # Compute the interactions + for j in 1:n + @inbounds pⱼ = pp[j] + xpⱼ = pⱼ.coordinates + d2 = norm_sqr(xpᵢ - xpⱼ) + if d2 <= cutoff_sqr + output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) + end + end + end + return output +end + +# Here skip determines if the interactions are self or cross, in such a way +# that, for self-computations, we need to skip the interactions when i >= j. +function _vinicial_cells!(f::F, box::Box{<:TriclinicCell}, cellᵢ, pp, Δc, output; skip=nothing) where {F<:Function} + @unpack cutoff, cutoff_sqr, inv_rotation = box + # Loop over particles of cell icell + for i in 1:cellᵢ.n_particles + @inbounds pᵢ = cellᵢ.particles[i] + # project particle in vector connecting cell centers + xpᵢ = pᵢ.coordinates + xproj = dot(xpᵢ - cellᵢ.center, Δc) + # Partition pp array according to the current projections + n = partition!(el -> abs(el.xproj - xproj) <= cutoff, pp) + # Compute the interactions + pᵢ.real || continue + for j in 1:n + @inbounds pⱼ = pp[j] + if skip === true + pᵢ.index >= pⱼ.index && continue + end + xpⱼ = pⱼ.coordinates + d2 = norm_sqr(xpᵢ - xpⱼ) + if d2 <= cutoff_sqr + output = f(inv_rotation * xpᵢ, inv_rotation * xpⱼ, pᵢ.index, pⱼ.index, d2, output) + end + end + end + return output +end diff --git a/src/neighborlists.jl b/src/neighborlists.jl index 1a514223..1661080e 100644 --- a/src/neighborlists.jl +++ b/src/neighborlists.jl @@ -185,7 +185,6 @@ function InPlaceNeighborList(; unitcell::Union{AbstractVecOrMat,Nothing}=nothing, parallel::Bool=true, show_progress::Bool=false, - autoswap=true, nbatches=(0, 0) ) T = cutoff isa Integer ? Float64 : eltype(cutoff) @@ -201,11 +200,11 @@ function InPlaceNeighborList(; unitcell = limits(x, y) end box = Box(unitcell, cutoff) - cl = CellList(x, y, box, autoswap=autoswap, parallel=parallel, nbatches=nbatches) + cl = CellList(x, y, box, parallel=parallel, nbatches=nbatches) aux = AuxThreaded(cl) end nb = NeighborList{T}(0, Vector{Tuple{Int,Int,T}}[]) - nb_threaded = [copy(nb) for _ in 1:CellListMap.nbatches(cl)] + nb_threaded = [copy(nb) for _ in 1:CellListMap.nbatches(cl, :map)] return InPlaceNeighborList(box, cl, aux, nb, nb_threaded, parallel, show_progress) end @@ -503,7 +502,7 @@ non-periodic (do not provide a `unitcell`): ```jldoctest ;filter = r"(\\d*)\\.(\\d{4})\\d+" => s"" julia> using CellListMap, PDBTools -julia> x = coor(readPDB(CellListMap.argon_pdb_file)); +julia> x = coor(read_pdb(CellListMap.argon_pdb_file)); julia> neighborlist(x, 8.0; parallel=false) 857-element Vector{Tuple{Int64, Int64, Float64}}: @@ -534,7 +533,7 @@ And now, considering the system periodic: ```jldoctest ;filter = r"(\\d*)\\.(\\d{4})\\d+" => s"" julia> using CellListMap, PDBTools -julia> x = coor(readPDB(CellListMap.argon_pdb_file)); +julia> x = coor(read_pdb(CellListMap.argon_pdb_file)); julia> neighborlist(x, 8.0; unitcell = [21.0, 21.0, 21.0], parallel=false) 1143-element Vector{Tuple{Int64, Int64, Float64}}: @@ -589,13 +588,10 @@ end unitcell=nothing, parallel=true, show_progress=false, - autoswap=true, nbatches=(0,0) ) -Computes the list of pairs of particles of `x` which are closer than `r` to -the particles of `y`. The `autoswap` option will swap `x` and `y` to try to optimize -the cost of the construction of the cell list. +Computes the list of pairs of particles of `x` which are closer than `r` to the particles of `y`. ## Examples @@ -605,32 +601,32 @@ non-periodic (do not provide a `unitcell`): ```jldoctest ;filter = r"(\\d*)\\.(\\d{4})\\d+" => s"" julia> using CellListMap, PDBTools -julia> x = coor(readPDB(CellListMap.argon_pdb_file, "index <= 50")); +julia> x = coor(read_pdb(CellListMap.argon_pdb_file, "index <= 50")); -julia> y = coor(readPDB(CellListMap.argon_pdb_file, "index > 50")); +julia> y = coor(read_pdb(CellListMap.argon_pdb_file, "index > 50")); julia> CellListMap.neighborlist(x, y, 8.0; parallel=false) 439-element Vector{Tuple{Int64, Int64, Float64}}: - (1, 13, 7.0177629626541265) - (1, 24, 7.976895636774999) - (1, 29, 3.1770283284856) - (1, 11, 4.0886518560523095) - (1, 17, 5.939772807102978) - (1, 30, 2.457228927063981) - (1, 44, 5.394713986857875) - (1, 45, 5.424876588458028) - (2, 2, 3.9973374888793174) - (2, 6, 5.355242104704511) + (1, 11, 4.088651646755291) + (1, 17, 5.939772435456664) + (1, 30, 2.4572288423012236) + (1, 44, 5.394714484195586) + (13, 11, 5.9391977223435495) + (13, 14, 4.560755938642345) + (13, 17, 5.323270872969311) + (13, 31, 4.201549872989818) + (15, 11, 6.710523644838785) + (15, 14, 6.58933933286106) ⋮ - (50, 27, 6.257296620746054) - (50, 32, 3.109966559305742) - (50, 33, 2.9192916949150525) - (50, 35, 5.043227240567294) - (50, 10, 3.9736202636890208) - (50, 20, 6.995405134800989) - (50, 39, 3.9001540995196584) - (50, 37, 7.5464903100713) - (50, 3, 7.232267901564487) + (46, 29, 7.402029970260964) + (46, 50, 4.926250116154994) + (46, 5, 6.738722573577668) + (46, 12, 6.363161177968381) + (46, 22, 5.082701606032681) + (46, 41, 4.531261514830008) + (46, 45, 4.251787182648767) + (46, 48, 4.9269093987894745) + (46, 1, 7.99947286297016) ``` Now, considering the system periodic: @@ -638,32 +634,32 @@ Now, considering the system periodic: ```jldoctest ;filter = r"(\\d*)\\.(\\d{4})\\d+" => s"" julia> using CellListMap, PDBTools -julia> x = coor(readPDB(CellListMap.argon_pdb_file, "index <= 50")); +julia> x = coor(read_pdb(CellListMap.argon_pdb_file, "index <= 50")); -julia> y = coor(readPDB(CellListMap.argon_pdb_file, "index > 50")); +julia> y = coor(read_pdb(CellListMap.argon_pdb_file, "index > 50")); julia> CellListMap.neighborlist(x, y, 8.0; unitcell = [21.0, 21.0, 21.0], parallel=false) 584-element Vector{Tuple{Int64, Int64, Float64}}: - (1, 12, 7.360804915224965) - (1, 13, 7.017762962654125) - (1, 24, 7.976895636774997) - (1, 29, 3.177028328485598) - (1, 44, 5.394713986857875) - (1, 45, 5.4248765884580274) - (1, 11, 4.088651856052312) - (1, 17, 5.93977280710298) - (1, 30, 2.457228927063981) - (1, 28, 6.853834401267658) + (1, 13, 7.0177634180502215) + (1, 24, 7.97689645513632) + (1, 29, 3.177029085967527) + (1, 44, 5.3947144841955845) + (1, 45, 5.424876392996784) + (7, 13, 5.245861876160856) + (7, 24, 7.56131349268393) + (7, 29, 2.2629708276336706) + (7, 44, 7.7484227088218764) + (7, 45, 3.3104152017215167) ⋮ - (50, 3, 7.232267901564487) - (50, 10, 3.9736202636890203) - (50, 27, 6.257296620746054) - (50, 32, 3.1099665593057426) - (50, 33, 2.919291694915052) - (50, 35, 5.043227240567294) - (50, 20, 6.995405134800987) - (50, 37, 7.546490310071297) - (50, 39, 3.900154099519657) + (45, 36, 7.530346972366768) + (18, 5, 6.8561687188298315) + (18, 12, 4.461142949385965) + (18, 41, 5.01835758100573) + (45, 16, 7.493858435501202) + (45, 25, 6.0791278577215815) + (45, 24, 6.97677144471301) + (18, 10, 6.9654396670725) + (18, 37, 6.222988130894417) ``` !!! note @@ -676,7 +672,6 @@ function neighborlist( unitcell=nothing, parallel=true, show_progress=false, - autoswap=true, nbatches=(0, 0) ) system = InPlaceNeighborList( @@ -686,7 +681,6 @@ function neighborlist( unitcell=unitcell, parallel=parallel, show_progress=show_progress, - autoswap=autoswap, nbatches=nbatches ) return neighborlist!(system) @@ -828,23 +822,25 @@ end @test compare_nb_lists(cl, nb, x, r)[1] nb = nl_NN(BallTree, inrange, x, y, r) - cl = CellListMap.neighborlist(x, y, r, autoswap=false) + cl = CellListMap.neighborlist(x, y, r) @test is_unique(cl; self=false) @test compare_nb_lists(cl, nb, x, y, r)[1] - cl = CellListMap.neighborlist(x, y, r, autoswap=true) + nb = nl_NN(BallTree, inrange, y, x, r) + cl = CellListMap.neighborlist(y, x, r) @test is_unique(cl; self=false) - @test compare_nb_lists(cl, nb, x, y, r)[1] + @test compare_nb_lists(cl, nb, y, x, r)[1] # with x smaller than y x = [rand(SVector{N,Float64}) for _ in 1:500] y = [rand(SVector{N,Float64}) for _ in 1:1000] nb = nl_NN(BallTree, inrange, x, y, r) - cl = CellListMap.neighborlist(x, y, r, autoswap=false) + cl = CellListMap.neighborlist(x, y, r) @test is_unique(cl; self=false) @test compare_nb_lists(cl, nb, x, y, r)[1] - cl = CellListMap.neighborlist(x, y, r, autoswap=true) + nb = nl_NN(BallTree, inrange, y, x, r) + cl = CellListMap.neighborlist(y, x, r) @test is_unique(cl; self=false) - @test compare_nb_lists(cl, nb, x, y, r)[1] + @test compare_nb_lists(cl, nb, y, x, r)[1] # Using matrices as input x = rand(N, 1000) @@ -856,23 +852,25 @@ end @test compare_nb_lists(cl, nb, x, r)[1] nb = nl_NN(BallTree, inrange, x, y, r) - cl = CellListMap.neighborlist(x, y, r, autoswap=false) + cl = CellListMap.neighborlist(x, y, r) @test is_unique(cl; self=false) @test compare_nb_lists(cl, nb, x, y, r)[1] - cl = CellListMap.neighborlist(x, y, r, autoswap=true) + nb = nl_NN(BallTree, inrange, y, x, r) + cl = CellListMap.neighborlist(y, x, r) @test is_unique(cl; self=false) - @test compare_nb_lists(cl, nb, x, y, r)[1] + @test compare_nb_lists(cl, nb, y, x, r)[1] # with x smaller than y x = rand(N, 500) y = rand(N, 1000) nb = nl_NN(BallTree, inrange, x, y, r) - cl = CellListMap.neighborlist(x, y, r, autoswap=false) + cl = CellListMap.neighborlist(x, y, r) @test is_unique(cl; self=false) @test compare_nb_lists(cl, nb, x, y, r)[1] - cl = CellListMap.neighborlist(x, y, r, autoswap=true) + nb = nl_NN(BallTree, inrange, y, x, r) + cl = CellListMap.neighborlist(y, x, r) @test is_unique(cl; self=false) - @test compare_nb_lists(cl, nb, x, y, r)[1] + @test compare_nb_lists(cl, nb, y, x, r)[1] # Check random coordinates to test the limits more thoroughly check_random_NN = true @@ -880,7 +878,7 @@ end x = rand(SVector{N,Float64}, 100) y = rand(SVector{N,Float64}, 50) nb = nl_NN(BallTree, inrange, x, y, r) - cl = CellListMap.neighborlist(x, y, r, autoswap=false) + cl = CellListMap.neighborlist(x, y, r) @test is_unique(cl; self=false) check_random_NN = compare_nb_lists(cl, nb, x, y, r)[1] end @@ -890,12 +888,13 @@ end x = rand(Float32, N, 500) y = rand(Float32, N, 1000) nb = nl_NN(BallTree, inrange, x, y, r) - cl = CellListMap.neighborlist(x, y, r, autoswap=false) + cl = CellListMap.neighborlist(x, y, r) @test is_unique(cl; self=false) @test compare_nb_lists(cl, nb, x, y, r)[1] - cl = CellListMap.neighborlist(x, y, r, autoswap=true) + nb = nl_NN(BallTree, inrange, y, x, r) + cl = CellListMap.neighborlist(y, x, r) @test is_unique(cl; self=false) - @test compare_nb_lists(cl, nb, x, y, r)[1] + @test compare_nb_lists(cl, nb, y, x, r)[1] end diff --git a/src/precompile.jl b/src/precompile.jl index 467e907e..f9cf7aeb 100644 --- a/src/precompile.jl +++ b/src/precompile.jl @@ -1,28 +1,35 @@ import PrecompileTools -if Base.VERSION >= v"1.9" - PrecompileTools.@setup_workload begin - # Putting some things in `@setup_workload` instead of `@compile_workload` can reduce the size of the - # precompile file and potentially make loading faster. - cutoff = 0.05 - x3d = rand(3, 100) - y3d = rand(3, 100) - u3d = [ 1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0 ] - x2d = rand(2, 100) - y2d = rand(2, 100) - u2d = [ 1.0 0.0; 0.0 1.0 ] - x, box = CellListMap.xatomic(5000) - f(x,y,i,j,d2,out) = out += d2 - PrecompileTools.@compile_workload begin - cl = CellList(x,box) - map_pairwise!(f, 0.0, box, cl) - neighborlist(x3d, y3d, cutoff, unitcell=u3d) - neighborlist(x3d, cutoff, unitcell=u3d) - neighborlist(x3d, y3d, cutoff) - neighborlist(x3d, cutoff) - neighborlist(x2d, y2d, cutoff, unitcell=u2d) - neighborlist(x2d, cutoff) - neighborlist(x2d, y2d, cutoff) - neighborlist(x2d, cutoff, unitcell=u2d) - end +PrecompileTools.@setup_workload begin + # Putting some things in `@setup_workload` instead of `@compile_workload` can reduce the size of the + # precompile file and potentially make loading faster. + import LinearAlgebra + cutoff = 0.05 + x3d = rand(3, 3) + x3d[:,3] .= x3d[:,2] .+ 1e-5 + y3d = copy(x3d) + u3d = [ 1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0 ] + x2d = rand(2, 3) + x2d[:,3] .= x2d[:,2] .+ 1e-5 + y2d = copy(x2d) + u2d = [ 1.0 0.0; 0.0 1.0 ] + x, box = CellListMap.xatomic(5000) + f(_,_,_,_,d2,out) = out += d2 + PrecompileTools.@compile_workload begin + cl = CellList(x,box) + map_pairwise!(f, 0.0, box, cl) + neighborlist(x3d, y3d, cutoff, unitcell=u3d) + neighborlist(x3d, y3d, cutoff, unitcell=LinearAlgebra.diag(u3d)) + neighborlist(x3d, cutoff, unitcell=u3d) + neighborlist(x3d, y3d, cutoff) + neighborlist(x3d, cutoff) + neighborlist(x2d, y2d, cutoff, unitcell=u2d) + neighborlist(x2d, y2d, cutoff, unitcell=LinearAlgebra.diag(u2d)) + neighborlist(x2d, cutoff) + neighborlist(x2d, y2d, cutoff) + neighborlist(x2d, cutoff, unitcell=u2d) + sys = ParticleSystem(positions=x3d, unitcell=u3d, cutoff=cutoff, output=0.0) + map_pairwise(f, y3d, sys) + sys = ParticleSystem(positions=x2d, unitcell=u2d, cutoff=cutoff, output=0.0) + map_pairwise(f, y2d, sys) end end \ No newline at end of file diff --git a/src/testing.jl b/src/testing.jl index 15ca59a2..579b2d99 100644 --- a/src/testing.jl +++ b/src/testing.jl @@ -26,6 +26,7 @@ function pathological_coordinates(N) x[12] = sides + @SVector [prevfloat(0.0), prevfloat(0.0), sides[3] * rand()] x[13] = @SVector [nextfloat(0.0), nextfloat(0.0), sides[3] * rand()] x[14] = @SVector [prevfloat(0.0), prevfloat(0.0), sides[3] * rand()] + x[15] = zero(SVector{3,Float64}) x[100] = @SVector [sides[1] / 2, -sides[2] / 2, 2 * sides[3]] y = [sides .* rand(SVector{3,Float64}) for i in 1:N] diff --git a/test/BasicForParticleSystem.jl b/test/BasicForParticleSystem.jl index c2f57941..989c7e01 100644 --- a/test/BasicForParticleSystem.jl +++ b/test/BasicForParticleSystem.jl @@ -202,6 +202,7 @@ end r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) system = ParticleSystem(xpositions=x, cutoff=0.1, output=0.0, unitcell=unitcell) @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system) + # Compute a different property, without updating lists r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += sqrt(d2), 0.0, box, cl) @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += sqrt(d2), system; update_lists=false) @@ -219,7 +220,7 @@ end box = Box(uc, 0.1) cl = CellList(x, y, box) r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) - system = ParticleSystem(xpositions=x, ypositions=y, cutoff=0.1, output=0.0, unitcell=unitcell, autoswap=false) + system = ParticleSystem(xpositions=x, ypositions=y, cutoff=0.1, output=0.0, unitcell=unitcell) @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system) # change x coordinates @@ -227,7 +228,7 @@ end cl = CellList(x, y, box) r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) system.xpositions .= x - @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=false) + @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=true) # increase x size x = rand(SVector{3,Float64}, 200) @@ -235,7 +236,7 @@ end r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) resize!(system.xpositions, length(x)) system.xpositions .= x - @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=false) + @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=true) # # x is greater @@ -247,7 +248,7 @@ end box = Box(uc, 0.1) cl = CellList(x, y, box) r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) - system = ParticleSystem(xpositions=x, ypositions=y, cutoff=0.1, output=0.0, unitcell=unitcell, autoswap=false) + system = ParticleSystem(xpositions=x, ypositions=y, cutoff=0.1, output=0.0, unitcell=unitcell) @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system) # change x coordinates @@ -255,7 +256,7 @@ end cl = CellList(x, y, box) r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) system.xpositions .= x - @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=false) + @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=true) # increase x size x = rand(SVector{3,Float64}, 1100) @@ -263,11 +264,94 @@ end r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) resize!(system.xpositions, length(x)) system.xpositions .= x - @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=false) + @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=true) + + # change y coordinates + y = rand(SVector{3,Float64}, 100) + cl = CellList(x, y, box) + r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) + system.ypositions .= y + @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=true) + + # increase y size + y = rand(SVector{3,Float64}, 110) + cl = CellList(x, y, box) + r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) + resize!(system.ypositions, length(y)) + system.ypositions .= y + @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=true) + + # increase y size beyond x size + y = rand(SVector{3,Float64}, 1300) + cl = CellList(x, y, box) + r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += d2, 0.0, box, cl) + resize!(system.ypositions, length(y)) + system.ypositions .= y + @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += d2, system; update_lists=true) + + # compute a different property, without updating lists + r = CellListMap.map_pairwise!((x, y, i, j, d2, r) -> r += 2*d2, 0.0, box, cl) + @test r ≈ map_pairwise!((x, y, i, j, d2, r) -> r += 2*d2, system; update_lists=false) end end +@testitem "cross_x_vs_sys" begin + using CellListMap + using StaticArrays + + f(_, _, _, _, d2, out) = out += d2 + cutoff = 0.1 + for uc in (nothing, [1,1,1], [1 0.2 0; 0.2 1.0 0; 0 0 1 ]), parallel in (false, true) + x = rand(SVector{3,Float64}, 100) + # y smaller than x + y = rand(SVector{3,Float64}, 10) + box = Box(isnothing(uc) ? limits(x,y) : uc, cutoff) + naive = CellListMap.map_naive!(f, 0.0, x, y, box) + sys = ParticleSystem(xpositions=x, cutoff=cutoff, output=0.0, unitcell=uc, parallel=parallel) + @test naive ≈ map_pairwise!(f, y, sys; update_lists=false) + @test naive ≈ map_pairwise!(f, y, sys; update_lists=true) + + # Update x positions + sys.xpositions .= rand(SVector{3,Float64}, 100) + box = Box(isnothing(uc) ? limits(x,y) : uc, cutoff) + naive = CellListMap.map_naive!(f, 0.0, x, y, box) + @test naive ≈ map_pairwise!(f, y, sys; update_lists=true) + + # Matrices as inputs + xmat = stack(x) + ymat = stack(y) + sys = ParticleSystem(xpositions=xmat, cutoff=cutoff, output=0.0, unitcell=uc, parallel=parallel) + @test naive ≈ map_pairwise!(f, ymat, sys; update_lists=false) + @test naive ≈ map_pairwise!(f, ymat, sys; update_lists=true) + + # y greater than x, and possibly spanning a region outside the box of x + y = 2 .* rand(SVector{3,Float64}, 100) + box = Box(isnothing(uc) ? limits(x,y) : uc, cutoff) + naive = CellListMap.map_naive!(f, 0.0, x, y, box) + sys = ParticleSystem(xpositions=x, cutoff=cutoff, output=0.0, unitcell=uc, parallel=parallel) + @test naive ≈ map_pairwise!(f, y, sys; update_lists=false) + @test naive ≈ map_pairwise!(f, y, sys; update_lists=true) + + # here y is completely outside the range of x, thus without PBCs, this is zero + y = rand(SVector{3,Float64}, 10) .+ Ref(SVector(10.0, 10.0, 10.0)) + box = Box(isnothing(uc) ? limits(x,y) : uc, cutoff) + naive = CellListMap.map_naive!(f, 0.0, x, y, box) + sys = ParticleSystem(xpositions=x, cutoff=cutoff, output=0.0, unitcell=uc, parallel=parallel) + @test naive ≈ map_pairwise!(f, y, sys; update_lists=false) + @test naive ≈ map_pairwise!(f, y, sys; update_lists=true) + end + + # Test if output_threaded was not provided + x = rand(SVector{3,Float64}, 100) + y = rand(SVector{3,Float64}, 10) + box = Box([1,1,1], cutoff) + cl = CellList(x, box) + naive = CellListMap.map_naive!(f, 0.0, x, y, box) + @test naive ≈ map_pairwise!(f, 0.0, box, y, cl; output_threaded=nothing, parallel=true) + +end + @testitem "ParticleSystem parallelization" begin @@ -472,6 +556,7 @@ end # Function to be evalulated for each pair: gravitational potential function potential(i, j, d2, u, mass) + d2 == 0.0 && return u d = sqrt(d2) u = u - 9.8 * mass[i] * mass[j] / d return u @@ -487,6 +572,7 @@ end # Function to be evalulated for each pair: gravitational force function calc_forces!(x, y, i, j, d2, mass, forces) + d2 == 0.0 && return forces G = 9.8 * mass[i] * mass[j] / d2 d = sqrt(d2) df = (G / d) * (x - y) diff --git a/test/runtests.jl b/test/runtests.jl index 16de7375..f0c8ef83 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,5 +1,7 @@ using TestItemRunner: @run_package_tests, @testitem +@run_package_tests + @testitem "Aqua.test_all" begin import Aqua Aqua.test_all(CellListMap) @@ -287,9 +289,24 @@ end @test map_pairwise!((x,y,i,j,d2,avg_dx) -> f(x,y,avg_dx),0.,new_box,new_cl,parallel=false) ≈ new_naive @test map_pairwise!((x,y,i,j,d2,avg_dx) -> f(x,y,avg_dx),0.,new_box,new_cl,parallel=true) ≈ new_naive -end + # Same as above, but with parallel=false on the update + new_cl = CellListMap.UpdateCellList!(new_x,new_box,new_cl,new_aux; parallel=false) + new_x, new_box = CellListMap.xatomic(10^4) + new_box = Box([ 200 0 10 + 15 200 0 + 0 0 200 ],cutoff) + new_naive = CellListMap.map_naive!((x,y,i,j,d2,avg_dx) -> f(x,y,avg_dx),0.,new_x,new_box) + new_cl = CellListMap.UpdateCellList!(new_x,new_box,new_cl,new_aux; parallel=false) + @test map_pairwise!((x,y,i,j,d2,avg_dx) -> f(x,y,avg_dx),0.,new_box,new_cl,parallel=false) ≈ new_naive + @test map_pairwise!((x,y,i,j,d2,avg_dx) -> f(x,y,avg_dx),0.,new_box,new_cl,parallel=true) ≈ new_naive + # Internal argument-error test: the should never reach this test + x = rand(SVector{3,Float64},100) + cl1 = CellList(x,Box([1,1,1],0.1)) + cl2 = CellList(x,Box([1.2,1.2,1.2],0.1)) + @test_throws ArgumentError CellListMap.merge_cell_lists!(cl1,cl2) +end @testitem "applications" begin @@ -319,6 +336,7 @@ end # Function to be evalulated for each pair: gravitational potential function potential(i,j,d2,u,mass) + d2 == 0.0 && return u d = sqrt(d2) u = u - 9.8*mass[i]*mass[j]/d return u @@ -332,6 +350,7 @@ end # Function to be evalulated for each pair: gravitational force function calc_forces!(x,y,i,j,d2,mass,forces) + d2 == 0.0 && return forces G = 9.8*mass[i]*mass[j]/d2 d = sqrt(d2) df = (G/d)*(x - y) @@ -456,5 +475,6 @@ include("$(@__DIR__)/namd/compare_with_namd.jl") include("$(@__DIR__)/gromacs/compare_with_gromacs.jl") include("$(@__DIR__)/BasicForParticleSystem.jl") include("$(@__DIR__)/namd/ParticleSystem_vs_NAMD.jl") +include("$(@__DIR__)/test_show.jl") + -@run_package_tests diff --git a/test/test_show.jl b/test/test_show.jl new file mode 100644 index 00000000..25c6364a --- /dev/null +++ b/test/test_show.jl @@ -0,0 +1,152 @@ +# +# Testing show methods +# +@testitem "show methods" begin + using CellListMap, StaticArrays + + function test_show( + x, s::String; + f64 = (x1,x2) -> isapprox(x1,x2,rtol=1e-3), + i64 = (x1,x2) -> x1 == x2, + rep = Dict{String,String}(), + ) + match(f,x1,x2) = begin + if !f(x1,x2) + println("show method test failed with $x1 == $x2") + return false + end + return true + end + buff = IOBuffer() + show(buff, MIME"text/plain"(), x) + ss = String(take!(buff)) + s = replace(s, rep...) + ss = replace(ss, rep...) + sfields = split(s) + ssfields = split(ss) + all_match = true + for (f1, f2) in zip(sfields, ssfields) + !all_match && break + value = tryparse(Int, f1) + if !isnothing(value) + all_match = match(i64, value, tryparse(Int, f2)) + continue + end + value = tryparse(Float64, f1) + if !isnothing(value) + all_match = match(f64, value, tryparse(Float64,f2)) + continue + end + all_match = match(isequal, f1, f2) + end + return all_match + end + + x = rand(SVector{3,Float64}, 100) + y = rand(SVector{3,Float64}, 10) + box = Box([10,10,10], 1) + + @test test_show(box, """ + Box{OrthorhombicCell, 3} + unit cell matrix = [ 10.0 0.0 0.0; 0.0 10.0 0.0; 0.0 0.0 10.0 ] + cutoff = 1.0 + number of computing cells on each dimension = [13, 13, 13] + computing cell sizes = [1.0, 1.0, 1.0] (lcell: 1) + Total number of cells = 2197 + """; + rep = Dict("CellListMap." => "") + ) + + cl = CellList(x, box; nbatches=(2,4)) + @test test_show(cl,""" + CellList{3, Float64} + 100 real particles. + 1 cells with real particles. + 800 particles in computing box, including images. + """; + rep = Dict("CellListMap." => "") + ) + + @test test_show(nbatches(cl), "(2, 4)") + @test test_show(CellListMap.AuxThreaded(cl),""" + CellListMap.AuxThreaded{3, Float64} + Auxiliary arrays for nbatches = 2 + """; + rep = Dict("CellListMap." => "") + ) + + cl = CellList(x,y,box; nbatches=(2,4)) + @test test_show(cl,""" + CellListMap.CellListPair{3, Float64} + 1 cells with real particles of the smallest set. + 1 cells with real particles of the largest set. + """; + rep = Dict("CellListMap." => "") + ) + + @test test_show(nbatches(cl), "(2, 4)") + @test test_show(CellListMap.AuxThreaded(cl),""" + CellListMap.AuxThreadedPair{3, Float64} + Auxiliary arrays for nbatches = 2 + """; + rep = Dict("CellListMap." => "") + ) + + s = ParticleSystem(xpositions=x,cutoff=0.1,unitcell=[10,10,10],output=0.0,nbatches=(2,2)) + @test test_show(s, """ + ParticleSystem1{output} of dimension 3, composed of: + Box{OrthorhombicCell, 3} + unit cell matrix = [ 10.0 0.0 0.0; 0.0 10.0 0.0; 0.0 0.0 10.0 ] + cutoff = 0.1 + number of computing cells on each dimension = [103, 103, 103] + computing cell sizes = [0.1, 0.1, 0.1] (lcell: 1) + Total number of cells = 1092727 + CellList{3, Float64} + 100 real particles. + 97 cells with real particles. + 139 particles in computing box, including images. + Parallelization auxiliary data set for 2 batch(es). + Type of output variable (output): Float64 + """; + i64 = (x1,x2) -> isapprox(x1, x2, rtol=1), + rep = Dict("CellListMap." => "") + ) + + s = ParticleSystem(xpositions=x,ypositions=y,cutoff=0.1,unitcell=[10,10,10],output=0.0,nbatches=(2,2)) + @test test_show(s, """ + ParticleSystem2{output} of dimension 3, composed of: + Box{OrthorhombicCell, 3} + unit cell matrix = [ 10.0 0.0 0.0; 0.0 10.0 0.0; 0.0 0.0 10.0 ] + cutoff = 0.1 + number of computing cells on each dimension = [103, 103, 103] + computing cell sizes = [0.1, 0.1, 0.1] (lcell: 1) + Total number of cells = 1092727 + CellListMap.CellListPair{3, Float64} + 10 cells with real particles of the smallest set. + 97 cells with real particles of the largest set. + Parallelization auxiliary data set for 2 batch(es). + Type of output variable (output): Float64 + """; + i64 = (x1,x2) -> isapprox(x1, x2, rtol=1), + rep = Dict("CellListMap." => "") + ) + + @test test_show(InPlaceNeighborList(x=x, cutoff=0.1, unitcell=[1,1,1]),""" + InPlaceNeighborList with types: + CellList{3, Float64} + Box{OrthorhombicCell, 3, Float64, Float64, 9, Float64} + Current list buffer size: 0 + """; + rep = Dict("CellListMap." => "") + ) + + @test test_show(InPlaceNeighborList(x=x, y=x, cutoff=0.1, unitcell=[1,1,1]),""" + InPlaceNeighborList with types: + CellListMap.CellListPair{3, Float64} + Box{OrthorhombicCell, 3, Float64, Float64, 9, Float64} + Current list buffer size: 0 + """; + rep = Dict("CellListMap." => "") + ) + +end \ No newline at end of file