From f153c95db953985ba4af5c87c2fb7e6ba2288e81 Mon Sep 17 00:00:00 2001 From: Daniel Karrasch Date: Wed, 29 Sep 2021 18:27:34 +0200 Subject: [PATCH] Package hygiene (#82) * package hygiene * add `Mass` and `Stiffness` types --- Project.toml | 2 +- docs/Manifest.toml | 265 ++++++++++++++++++-------------- docs/makeimages.jl | 2 +- docs/src/fem.md | 11 +- src/CoherentStructures.jl | 47 +++--- src/FEMassembly.jl | 39 +++-- src/TO.jl | 103 ++++++------- src/advection_diffusion.jl | 26 ++-- src/boundaryconditions.jl | 4 +- src/diffusion_operators.jl | 51 ++---- src/dynamicmetrics.jl | 137 ++++++++--------- src/ellipticLCS.jl | 94 ++++++----- src/exports.jl | 31 ++-- src/gridfunctions.jl | 34 ++-- src/odesolvers.jl | 64 ++++---- src/plotting.jl | 4 +- src/pointlocation.jl | 24 +-- src/pullbacktensors.jl | 50 +++--- src/util.jl | 36 ++--- src/velocityfields.jl | 16 +- test/define_vector_fields.jl | 8 +- test/test_advectiondiffusion.jl | 4 +- test/test_elliptic.jl | 40 ++--- test/test_fem.jl | 16 +- test/test_odesolvers.jl | 4 +- 25 files changed, 554 insertions(+), 558 deletions(-) diff --git a/Project.toml b/Project.toml index 8846019e..f643c8ff 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "CoherentStructures" uuid = "0c1513b4-3a13-56f1-9cd2-8312eaec88c4" -version = "0.4.4" +version = "0.4.5" [deps] ArnoldiMethod = "ec485272-7323-5ecc-a04f-4719b315124d" diff --git a/docs/Manifest.toml b/docs/Manifest.toml index 298c7c64..1e35b253 100644 --- a/docs/Manifest.toml +++ b/docs/Manifest.toml @@ -34,9 +34,9 @@ version = "3.5.0+3" [[ArrayInterface]] deps = ["IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"] -git-tree-sha1 = "baf4ef9082070477046bd98306952292bfcb0af9" +git-tree-sha1 = "d84c956c4c0548b4caf0e4e96cf5b6494b5b1529" uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" -version = "3.1.25" +version = "3.1.32" [[Artifacts]] uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" @@ -56,35 +56,41 @@ version = "0.4.4" [[Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" +[[BitTwiddlingConvenienceFunctions]] +deps = ["Static"] +git-tree-sha1 = "652aab0fc0d6d4db4cc726425cadf700e9f473f1" +uuid = "62783981-4cbd-42fc-bca8-16325de8dc4b" +version = "0.1.0" + [[Bzip2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "c3598e525718abcc440f69cc6d5f60dda0a1b61e" +git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2" uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0" -version = "1.0.6+5" +version = "1.0.8+0" [[CPUSummary]] deps = ["Hwloc", "IfElse", "Static"] -git-tree-sha1 = "147bcca99e098c0da48d7d9e108210704138f0f9" +git-tree-sha1 = "ed720e2622820bf584d4ad90e6fcb93d95170b44" uuid = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9" -version = "0.1.2" +version = "0.1.3" [[Cairo_jll]] deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "e2f47f6d8337369411569fd45ae5753ca10394c6" +git-tree-sha1 = "f2202b55d816427cd385a9a4f3ffb226bee80f99" uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a" -version = "1.16.0+6" +version = "1.16.1+0" [[ChainRulesCore]] deps = ["Compat", "LinearAlgebra", "SparseArrays"] -git-tree-sha1 = "bdc0937269321858ab2a4f288486cb258b9a0af7" +git-tree-sha1 = "4ce9393e871aca86cc457d9f66976c3da6902ea7" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.3.0" +version = "1.4.0" [[CloseOpenIntervals]] deps = ["ArrayInterface", "Static"] -git-tree-sha1 = "4fcacb5811c9e4eb6f9adde4afc0e9c4a7a92f5a" +git-tree-sha1 = "ce9c0d07ed6e1a4fecd2df6ace144cbd29ba6f37" uuid = "fb6a15b2-703c-40df-9091-08a04967cfa9" -version = "0.1.1" +version = "0.1.2" [[Clustering]] deps = ["Distances", "LinearAlgebra", "NearestNeighbors", "Printf", "SparseArrays", "Statistics", "StatsBase"] @@ -100,9 +106,7 @@ version = "0.7.0" [[CoherentStructures]] deps = ["ArnoldiMethod", "AxisArrays", "ColorTypes", "DiffEqBase", "Distances", "Distributed", "Ferrite", "GeometricalPredicates", "Interpolations", "IterativeSolvers", "LinearAlgebra", "LinearMaps", "NearestNeighbors", "OrdinaryDiffEq", "ProgressMeter", "RecipesBase", "SharedArrays", "SparseArrays", "StaticArrays", "Statistics", "Tensors", "VoronoiDelaunay"] -git-tree-sha1 = "458314b08f18e814da0a0bd12b6ecb1d37688821" -repo-rev = "master" -repo-url = "https://github.com/CoherentStructures/CoherentStructures.jl.git" +path = ".." uuid = "0c1513b4-3a13-56f1-9cd2-8312eaec88c4" version = "0.4.4" @@ -137,9 +141,9 @@ version = "0.3.0" [[Compat]] deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] -git-tree-sha1 = "727e463cfebd0c7b999bbf3e9e7e16f254b94193" +git-tree-sha1 = "4866e381721b30fac8dda4c8cb1d9db45c8d2994" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "3.34.0" +version = "3.37.0" [[CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] @@ -164,9 +168,9 @@ uuid = "754358af-613d-5f8d-9788-280bf1605d4c" version = "0.2.0" [[DataAPI]] -git-tree-sha1 = "ee400abb2298bd13bfc3df1c412ed228061a2385" +git-tree-sha1 = "bec2532f8adb82005476c141ec23e921fc20971b" uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" -version = "1.7.0" +version = "1.8.0" [[DataStructures]] deps = ["Compat", "InteractiveUtils", "OrderedCollections"] @@ -189,9 +193,9 @@ uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" [[DiffEqBase]] deps = ["ArrayInterface", "ChainRulesCore", "DEDataArrays", "DataStructures", "Distributions", "DocStringExtensions", "FastBroadcast", "ForwardDiff", "FunctionWrappers", "IterativeSolvers", "LabelledArrays", "LinearAlgebra", "Logging", "MuladdMacro", "NonlinearSolve", "Parameters", "PreallocationTools", "Printf", "RecursiveArrayTools", "RecursiveFactorization", "Reexport", "Requires", "SciMLBase", "Setfield", "SparseArrays", "StaticArrays", "Statistics", "SuiteSparse", "ZygoteRules"] -git-tree-sha1 = "f52aa1dfbb028768dc952bd4d798646c3538d363" +git-tree-sha1 = "420ad175d5e420e2c55a0ed8a9c18556e6735f80" uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" -version = "6.73.0" +version = "6.73.2" [[DiffResults]] deps = ["StaticArrays"] @@ -207,19 +211,19 @@ version = "1.3.0" [[Distances]] deps = ["LinearAlgebra", "Statistics", "StatsAPI"] -git-tree-sha1 = "abe4ad222b26af3337262b8afb28fab8d215e9f8" +git-tree-sha1 = "9f46deb4d4ee4494ffb5a40a27a2aced67bdd838" uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" -version = "0.10.3" +version = "0.10.4" [[Distributed]] deps = ["Random", "Serialization", "Sockets"] uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" [[Distributions]] -deps = ["FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns"] -git-tree-sha1 = "3889f646423ce91dd1055a76317e9a1d3a23fff1" +deps = ["ChainRulesCore", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "StatsFuns"] +git-tree-sha1 = "f4efaa4b5157e0cdb8283ae0b5428bc9208436ed" uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" -version = "0.25.11" +version = "0.25.16" [[DocStringExtensions]] deps = ["LibGit2"] @@ -229,9 +233,9 @@ version = "0.8.5" [[Documenter]] deps = ["ANSIColoredPrinters", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] -git-tree-sha1 = "350dced36c11f794c6c4da5dc6493ec894e50c16" +git-tree-sha1 = "fe0bc46b27cd3413df55859152fd70e50744025f" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "0.27.5" +version = "0.27.6" [[Downloads]] deps = ["ArgTools", "LibCURL", "NetworkOptions"] @@ -239,9 +243,9 @@ uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" [[EarCut_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "92d8f9f208637e8d2d28c664051a00569c01493d" +git-tree-sha1 = "3f3a2501fa7236e9b911e0f7a588c657e822bb6d" uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5" -version = "2.1.5+1" +version = "2.2.3+0" [[EllipsisNotation]] deps = ["ArrayInterface"] @@ -268,10 +272,10 @@ uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" version = "0.4.1" [[FFMPEG_jll]] -deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "LibVPX_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] -git-tree-sha1 = "3cc57ad0a213808473eafef4845a74766242e05f" +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"] +git-tree-sha1 = "d8a578692e3077ac998b50c0217dfd67f21d1e5f" uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5" -version = "4.3.1+4" +version = "4.4.0+0" [[FastBroadcast]] deps = ["LinearAlgebra"] @@ -292,15 +296,15 @@ version = "0.3.0" [[FileIO]] deps = ["Pkg", "Requires", "UUIDs"] -git-tree-sha1 = "937c29268e405b6808d958a9ac41bfe1a31b08e7" +git-tree-sha1 = "3c041d2ac0a52a12a27af2782b34900d9c3ee68c" uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" -version = "1.11.0" +version = "1.11.1" [[FillArrays]] deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"] -git-tree-sha1 = "7c365bdef6380b29cfc5caaf99688cd7489f9b87" +git-tree-sha1 = "caf289224e622f518c9dbfe832cdafa17d7c80a6" uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "0.12.2" +version = "0.12.4" [[FiniteDiff]] deps = ["ArrayInterface", "LinearAlgebra", "Requires", "SparseArrays", "StaticArrays"] @@ -316,9 +320,9 @@ version = "0.8.4" [[Fontconfig_jll]] deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "35895cf184ceaab11fd778b4590144034a167a2f" +git-tree-sha1 = "21efd19106a55620a188615da6d3d06cd7f6ee03" uuid = "a3f928ae-7b40-5064-980b-68af3947d34b" -version = "2.13.1+14" +version = "2.13.93+0" [[Formatting]] deps = ["Printf"] @@ -334,9 +338,9 @@ version = "0.10.19" [[FreeType2_jll]] deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] -git-tree-sha1 = "cbd58c9deb1d304f5a245a0b7eb841a2560cfec6" +git-tree-sha1 = "87eb71354d8ec1a96d4a7636bd57a7347dde3ef9" uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7" -version = "2.10.1+5" +version = "2.10.4+0" [[FriBidi_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -371,9 +375,9 @@ version = "0.58.1" [[GR_jll]] deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "d59e8320c2747553788e4fc42231489cc602fa50" +git-tree-sha1 = "ef49a187604f865f4708c90e3f431890724e9012" uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" -version = "0.58.1+0" +version = "0.59.0+0" [[GeometricalPredicates]] git-tree-sha1 = "04776c0dc233eafa617b1a52ca58db2338b08836" @@ -398,6 +402,12 @@ git-tree-sha1 = "7bf67e9a481712b3dbe9cb3dac852dc4b1162e02" uuid = "7746bdde-850d-59dc-9ae8-88ece973131d" version = "2.68.3+0" +[[Graphite2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "344bf40dcab1073aca04aa0df4fb092f920e4011" +uuid = "3b182d85-2403-5c21-9c21-1e1f0cc25472" +version = "1.3.14+0" + [[Grisu]] git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" @@ -405,15 +415,21 @@ version = "1.0.2" [[HTTP]] deps = ["Base64", "Dates", "IniFile", "Logging", "MbedTLS", "NetworkOptions", "Sockets", "URIs"] -git-tree-sha1 = "44e3b40da000eab4ccb1aecdc4801c040026aeb5" +git-tree-sha1 = "60ed5f1643927479f845b0135bb369b031b541fa" uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" -version = "0.9.13" +version = "0.9.14" + +[[HarfBuzz_jll]] +deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "Graphite2_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg"] +git-tree-sha1 = "8a954fed8ac097d5be04921d595f741115c1b2ad" +uuid = "2e76f6c2-a576-52d4-95c1-20adfe4de566" +version = "2.8.1+0" [[HostCPUFeatures]] -deps = ["IfElse", "Libdl", "Static"] -git-tree-sha1 = "e86382a874edd4ff47fd1373e03f38302af93345" +deps = ["BitTwiddlingConvenienceFunctions", "IfElse", "Libdl", "Static"] +git-tree-sha1 = "3169c8b31863f9a409be1d17693751314241e3eb" uuid = "3e5b6fbb-0976-4d2c-9146-d79de83f2fb0" -version = "0.1.2" +version = "0.1.4" [[Hwloc]] deps = ["Hwloc_jll"] @@ -488,9 +504,9 @@ version = "1.0.0" [[JLD2]] deps = ["DataStructures", "FileIO", "MacroTools", "Mmap", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] -git-tree-sha1 = "59ee430ac5dc87bc3eec833cc2a37853425750b4" +git-tree-sha1 = "192934b3e2a94e897ce177423fd6cf7bdf464bce" uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -version = "0.4.13" +version = "0.4.14" [[JLLWrappers]] deps = ["Preferences"] @@ -539,6 +555,12 @@ git-tree-sha1 = "a4b12a1bd2ebade87891ab7e36fdbce582301a92" uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" version = "0.15.6" +[[LayoutPointers]] +deps = ["ArrayInterface", "LinearAlgebra", "ManualMemory", "SIMDTypes", "Static"] +git-tree-sha1 = "d2bda6aa0b03ce6f141a2dc73d0bcb7070131adc" +uuid = "10f19ff3-798f-405d-979b-55457f8fc047" +version = "0.1.3" + [[LibCURL]] deps = ["LibCURL_jll", "MozillaCACerts_jll"] uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" @@ -555,12 +577,6 @@ uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" deps = ["Artifacts", "Libdl", "MbedTLS_jll"] uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" -[[LibVPX_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "12ee7e23fa4d18361e7c2cde8f8337d4c3101bc7" -uuid = "dd192d2f-8180-539f-9fb4-cc70b1dcf69a" -version = "1.10.0+0" - [[Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" @@ -642,24 +658,24 @@ version = "3.4.0" [[Literate]] deps = ["Base64", "IOCapture", "JSON", "REPL"] -git-tree-sha1 = "4a165f8517bbc8e6df7300e2ac3da683e3c099df" +git-tree-sha1 = "bbebc3c14dbfbe76bfcbabf0937481ac84dc86ef" uuid = "98b081ad-f1c9-55d3-8b20-4c87d4299306" -version = "2.9.2" +version = "2.9.3" [[LogExpFunctions]] -deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "3d682c07e6dd250ed082f883dc88aee7996bf2cc" +deps = ["ChainRulesCore", "DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] +git-tree-sha1 = "34dc30f868e368f8a17b728a1238f3fcda43931a" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.0" +version = "0.3.3" [[Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" [[LoopVectorization]] -deps = ["ArrayInterface", "DocStringExtensions", "IfElse", "LinearAlgebra", "OffsetArrays", "Polyester", "Requires", "SLEEFPirates", "Static", "StrideArraysCore", "ThreadingUtilities", "UnPack", "VectorizationBase"] -git-tree-sha1 = "dcaae6d7c8223946e2838ea97888e1db4ceb42eb" +deps = ["ArrayInterface", "CPUSummary", "CloseOpenIntervals", "DocStringExtensions", "HostCPUFeatures", "IfElse", "LayoutPointers", "LinearAlgebra", "OffsetArrays", "PolyesterWeave", "Requires", "SLEEFPirates", "Static", "ThreadingUtilities", "UnPack", "VectorizationBase"] +git-tree-sha1 = "d469fcf148475a74c221f14d42ee75da7ccb3b4e" uuid = "bdcacae8-1622-11e9-2a5c-532679323890" -version = "0.12.66" +version = "0.12.73" [[MPC_jll]] deps = ["GMP_jll", "Libdl", "MPFR_jll", "Pkg"] @@ -673,9 +689,9 @@ uuid = "3a97d323-0669-5f0c-9066-3539efd106a3" [[MacroTools]] deps = ["Markdown", "Random"] -git-tree-sha1 = "0fb723cd8c45858c22169b2e42269e53271a6df7" +git-tree-sha1 = "5a5bc6bf062f0f95e62d0fe0a2d99699fed82dd9" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.5.7" +version = "0.5.8" [[ManualMemory]] git-tree-sha1 = "9cb207b18148b2199db259adfa923b45593fe08e" @@ -703,9 +719,9 @@ version = "0.3.1" [[Missings]] deps = ["DataAPI"] -git-tree-sha1 = "2ca267b08821e86c5ef4376cffed98a46c2cb205" +git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f" uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28" -version = "1.0.1" +version = "1.0.2" [[Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" @@ -746,15 +762,15 @@ uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" [[NonlinearSolve]] deps = ["ArrayInterface", "FiniteDiff", "ForwardDiff", "IterativeSolvers", "LinearAlgebra", "RecursiveArrayTools", "RecursiveFactorization", "Reexport", "SciMLBase", "Setfield", "StaticArrays", "UnPack"] -git-tree-sha1 = "f2530482ef6447c8ae24c660914436f1ae3917e0" +git-tree-sha1 = "35585534c0c79c161241f2e65e759a11a79d25d0" uuid = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" -version = "0.3.9" +version = "0.3.10" [[OffsetArrays]] deps = ["Adapt"] -git-tree-sha1 = "c0f4a4836e5f3e0763243b8324200af6d0e0f90c" +git-tree-sha1 = "c870a0d713b51e4b49be6432eff0e26a4325afee" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.10.5" +version = "1.10.6" [[Ogg_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -791,9 +807,9 @@ version = "1.4.1" [[OrdinaryDiffEq]] deps = ["Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DocStringExtensions", "ExponentialUtilities", "FastClosures", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "Logging", "LoopVectorization", "MacroTools", "MuladdMacro", "NLsolve", "Polyester", "RecursiveArrayTools", "Reexport", "SparseArrays", "SparseDiffTools", "StaticArrays", "UnPack"] -git-tree-sha1 = "2867051497b7daf05dd2839523732e137507dcc4" +git-tree-sha1 = "1d4744d7f1af67394c90b338e573000cc76802a1" uuid = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -version = "5.63.3" +version = "5.63.5" [[PCRE_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] @@ -837,21 +853,27 @@ version = "2.0.1" [[PlotUtils]] deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"] -git-tree-sha1 = "501c20a63a34ac1d015d5304da0e645f42d91c9f" +git-tree-sha1 = "9ff1c70190c1c30aebca35dc489f7411b256cd23" uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" -version = "1.0.11" +version = "1.0.13" [[Plots]] -deps = ["Base64", "Contour", "Dates", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"] -git-tree-sha1 = "8365fa7758e2e8e4443ce866d6106d8ecbb4474e" +deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"] +git-tree-sha1 = "2dbafeadadcf7dadff20cd60046bba416b4912be" uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "1.20.1" +version = "1.21.3" [[Polyester]] -deps = ["ArrayInterface", "IfElse", "ManualMemory", "Requires", "Static", "StrideArraysCore", "ThreadingUtilities", "VectorizationBase"] -git-tree-sha1 = "3ced65f2f182e5b5335a573eaa98f883eba3678b" +deps = ["ArrayInterface", "BitTwiddlingConvenienceFunctions", "CPUSummary", "IfElse", "ManualMemory", "Requires", "Static", "StrideArraysCore", "ThreadingUtilities"] +git-tree-sha1 = "21d8a7163d0f3972ade36ca2b5a0e8a27ac96842" uuid = "f517fe37-dbe3-4b94-8317-1923a5111588" -version = "0.3.9" +version = "0.4.4" + +[[PolyesterWeave]] +deps = ["BitTwiddlingConvenienceFunctions", "CPUSummary", "IfElse", "Static", "ThreadingUtilities"] +git-tree-sha1 = "371a19bb801c1b420b29141750f3a34d6c6634b9" +uuid = "1d0040c9-8b98-4ee7-8388-3f51789ca0ad" +version = "0.1.0" [[PreallocationTools]] deps = ["ArrayInterface", "ForwardDiff", "LabelledArrays"] @@ -913,9 +935,9 @@ version = "1.1.2" [[RecipesPipeline]] deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"] -git-tree-sha1 = "2a7a2469ed5d94a98dea0e85c46fa653d76be0cd" +git-tree-sha1 = "d4491becdc53580c6dadb0f6249f90caae888554" uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c" -version = "0.3.4" +version = "0.4.0" [[RecursiveArrayTools]] deps = ["ArrayInterface", "ChainRulesCore", "DocStringExtensions", "LinearAlgebra", "RecipesBase", "Requires", "StaticArrays", "Statistics", "ZygoteRules"] @@ -925,9 +947,9 @@ version = "2.17.2" [[RecursiveFactorization]] deps = ["LinearAlgebra", "LoopVectorization", "Polyester", "StrideArraysCore", "TriangularSolve"] -git-tree-sha1 = "9ac54089f52b0d0c37bebca35b9505720013a108" +git-tree-sha1 = "575c18c6b00ce409f75d96fefe33ebe01575457a" uuid = "f2c3362d-daeb-58d1-803e-2bc74f2840b4" -version = "0.2.2" +version = "0.2.4" [[Reexport]] git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" @@ -960,11 +982,16 @@ git-tree-sha1 = "9ba33637b24341aba594a2783a502760aa0bff04" uuid = "fdea26ae-647d-5447-a871-4b548cad5224" version = "3.3.1" +[[SIMDTypes]] +git-tree-sha1 = "330289636fb8107c5f32088d2741e9fd7a061a5c" +uuid = "94e857df-77ce-4151-89e5-788b33177be4" +version = "0.1.0" + [[SLEEFPirates]] deps = ["IfElse", "Static", "VectorizationBase"] -git-tree-sha1 = "bfdf9532c33db35d2ce9df4828330f0e92344a52" +git-tree-sha1 = "947491c30d4293bebb00781bcaf787ba09e7c20d" uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa" -version = "0.6.25" +version = "0.6.26" [[SciMLBase]] deps = ["ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "RecipesBase", "RecursiveArrayTools", "StaticArrays", "Statistics", "Tables", "TreeViews"] @@ -1030,9 +1057,9 @@ version = "1.6.1" [[Static]] deps = ["IfElse"] -git-tree-sha1 = "62701892d172a2fa41a1f829f66d2b0db94a9a63" +git-tree-sha1 = "a8f30abc7c64a39d389680b74e749cf33f872a70" uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" -version = "0.3.0" +version = "0.3.3" [[StaticArrays]] deps = ["LinearAlgebra", "Random", "Statistics"] @@ -1051,15 +1078,15 @@ version = "1.0.0" [[StatsBase]] deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] -git-tree-sha1 = "fed1ec1e65749c4d96fc20dd13bea72b55457e62" +git-tree-sha1 = "8cbbc098554648c84f79a463c9ff0fd277144b6c" uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -version = "0.33.9" +version = "0.33.10" [[StatsFuns]] -deps = ["IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] -git-tree-sha1 = "20d1bb720b9b27636280f751746ba4abb465f19d" +deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] +git-tree-sha1 = "46d7ccc7104860c38b11966dd1f72ff042f382e4" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" -version = "0.9.9" +version = "0.9.10" [[StreamMacros]] deps = ["DiffEqBase", "StaticArrays", "SymEngine"] @@ -1070,16 +1097,16 @@ uuid = "1418a599-1564-4a75-98e9-b890cf8b552f" version = "0.2.0" [[StrideArraysCore]] -deps = ["ArrayInterface", "ManualMemory", "Requires", "ThreadingUtilities", "VectorizationBase"] -git-tree-sha1 = "9ab16bda5fe1212e0af0bea80f1d11096aeb3248" +deps = ["ArrayInterface", "CloseOpenIntervals", "IfElse", "LayoutPointers", "ManualMemory", "Requires", "SIMDTypes", "Static", "ThreadingUtilities"] +git-tree-sha1 = "1258e25e171aec339866f283a11e7d75867e77d7" uuid = "7792a7ef-975c-4747-a70f-980b88e8d1da" -version = "0.1.18" +version = "0.2.4" [[StructArrays]] deps = ["Adapt", "DataAPI", "StaticArrays", "Tables"] -git-tree-sha1 = "000e168f5cc9aded17b6999a560b7c11dda69095" +git-tree-sha1 = "f41020e84127781af49fc12b7e92becd7f5dd0ba" uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a" -version = "0.6.0" +version = "0.6.2" [[SuiteSparse]] deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] @@ -1109,9 +1136,9 @@ version = "1.0.1" [[Tables]] deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"] -git-tree-sha1 = "d0c690d37c73aeb5ca063056283fde5585a41710" +git-tree-sha1 = "1162ce4a6c4b7e31e0e6b14486a6986951c73be9" uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" -version = "1.5.0" +version = "1.5.2" [[Tar]] deps = ["ArgTools", "SHA"] @@ -1119,9 +1146,9 @@ uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" [[Tensors]] deps = ["ForwardDiff", "LinearAlgebra", "SIMD", "Statistics"] -git-tree-sha1 = "b0b967686e9b48f024ee967f98cbffcd4b0ff6cc" +git-tree-sha1 = "46662e28c5d32a77f21a57d2263aa3bf8d1edee4" uuid = "48a634ad-e948-5137-8d70-aa71f2a747f4" -version = "1.6.0" +version = "1.6.1" [[Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] @@ -1146,10 +1173,10 @@ uuid = "a2a6695c-b41b-5b7d-aed9-dbfdeacea5d7" version = "0.3.0" [[TriangularSolve]] -deps = ["CloseOpenIntervals", "IfElse", "LinearAlgebra", "LoopVectorization", "Polyester", "Static", "VectorizationBase"] -git-tree-sha1 = "cb80cf5e0dfb1aedd4c6dbca09b5faaa9a300c62" +deps = ["CloseOpenIntervals", "IfElse", "LayoutPointers", "LinearAlgebra", "LoopVectorization", "Polyester", "Static", "VectorizationBase"] +git-tree-sha1 = "1eed054a58d9332adc731103fe47dad2ad1a0adf" uuid = "d5829a12-d9aa-46ab-831f-fb7c9ab06edf" -version = "0.1.3" +version = "0.1.5" [[URIs]] git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355" @@ -1169,10 +1196,10 @@ version = "1.0.2" uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" [[VectorizationBase]] -deps = ["ArrayInterface", "CPUSummary", "HostCPUFeatures", "Hwloc", "IfElse", "Libdl", "LinearAlgebra", "Static"] -git-tree-sha1 = "777a3ca87b8d28eaa42bc58b125ab6ba08fb417e" +deps = ["ArrayInterface", "CPUSummary", "HostCPUFeatures", "Hwloc", "IfElse", "LayoutPointers", "Libdl", "LinearAlgebra", "SIMDTypes", "Static"] +git-tree-sha1 = "43c605e008ac67adb672ef08721d4720dfe2ad41" uuid = "3d5dd08c-fd9d-11e8-17fa-ed2836048c2f" -version = "0.20.37" +version = "0.21.7" [[VertexSafeGraphs]] deps = ["LightGraphs"] @@ -1365,16 +1392,16 @@ uuid = "700de1a5-db45-46bc-99cf-38207098b444" version = "0.2.1" [[libass_jll]] -deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] -git-tree-sha1 = "acc685bcf777b2202a904cdcb49ad34c2fa1880c" +deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "5982a94fcba20f02f42ace44b9894ee2b140fe47" uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0" -version = "0.14.0+4" +version = "0.15.1+0" [[libfdk_aac_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "7a5780a0d9c6864184b3a2eeeb833a0c871f00ab" +git-tree-sha1 = "daacc84a041563f965be61859a36e17c4e4fcd55" uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280" -version = "0.1.6+4" +version = "2.0.2+0" [[libpng_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] @@ -1398,15 +1425,15 @@ uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" [[x264_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "d713c1ce4deac133e3334ee12f4adff07f81778f" +git-tree-sha1 = "4fea590b89e6ec504593146bf8b988b2c00922b2" uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a" -version = "2020.7.14+2" +version = "2021.5.5+0" [[x265_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "487da2f8f2f0c8ee0e83f39d13037d6bbf0a45ab" +git-tree-sha1 = "ee567a171cce03570d77ad3a43e90218e38937a9" uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76" -version = "3.0.0+3" +version = "3.5.0+0" [[xkbcommon_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"] diff --git a/docs/makeimages.jl b/docs/makeimages.jl index 6a856910..b02f4491 100644 --- a/docs/makeimages.jl +++ b/docs/makeimages.jl @@ -48,7 +48,7 @@ ctx = regularTriangularGrid((100,100), [0.0,0.0],[2π,2π]) pred = (x,y) -> ((x[1] - y[1]) % 2π) < 1e-9 && ((x[2] - y[2]) % 2π) < 1e-9 bdata = BoundaryData(ctx,pred) -id2 = one(Tensors.Tensor{2,2}) # 2D identity tensor +id2 = one(Tensor{2,2}) # 2D identity tensor cgfun = x -> 0.5*(id2 + dott(inv(DstandardMap(x)))) K = assembleStiffnessMatrix(ctx,cgfun,bdata=bdata) diff --git a/docs/src/fem.md b/docs/src/fem.md index cf06ebe4..089cc3d2 100644 --- a/docs/src/fem.md +++ b/docs/src/fem.md @@ -162,7 +162,8 @@ u[20] = 2.0; u[38] = 3.0; u[56] = 4.0 plot_u(ctx, u, 200, 200, bdata=bdata, colorbar=:none) ``` -To apply boundary conditions to a stiffness/mass matrix, use the `applyBCS` function. Note that `assembleStiffnessMatrix` and `assembleMassMatrix` take a `bdata` argument that does this internally. +To apply boundary conditions to a stiffness/mass matrix, use the `applyBCS` function. Note +that both `assemble` functions take a `bdata` argument that does this internally. ## Plotting and Videos @@ -172,8 +173,9 @@ The simplest way to plot is using the [`plot_u`](@ref) function. Plots and video ## Parallelisation -Many of the plotting functions support parallelism internally. -Tensor fields can be constructed in parallel, and then passed to [`assembleStiffnessMatrix`](@ref). For an example that does this, see +Many of the plotting functions support parallelism internally. Tensor fields can be +constructed in parallel, and then passed to [`assemble(Stiffness(), _)`](@ref). For an +example that does this, see TODO: Add this example ## FEM-API @@ -185,8 +187,7 @@ CurrentModule = CoherentStructures ### Stiffness and Mass Matrices ```@docs -assembleStiffnessMatrix -assembleMassMatrix +assemble adaptiveTOCollocationStiffnessMatrix ``` diff --git a/src/CoherentStructures.jl b/src/CoherentStructures.jl index 2aa6f0d8..48cf04da 100644 --- a/src/CoherentStructures.jl +++ b/src/CoherentStructures.jl @@ -2,42 +2,41 @@ module CoherentStructures # use standard libraries using LinearAlgebra -using ProgressMeter +using LinearAlgebra: checksquare +import ProgressMeter using SparseArrays +using SparseArrays: nzvalview, dropzeros! using Distributed using SharedArrays: SharedArray using Statistics: mean # import data type packages -import StaticArrays -using StaticArrays: SVector, @SVector, SArray, SMatrix, @SMatrix -import Tensors -using Tensors: Vec, Tensor, SymmetricTensor -import AxisArrays +using StaticArrays: SVector, @SVector, SMatrix, @SMatrix +using Tensors: Vec, Tensor, SymmetricTensor, basevec, dott, tdot, otimes, symmetric using AxisArrays: AxisArray, ClosedInterval, axisvalues -import DiffEqBase -import OrdinaryDiffEq -const ODE = OrdinaryDiffEq -import Distances -const Dists = Distances -import NearestNeighbors -const NN = NearestNeighbors -import Interpolations -const ITP = Interpolations +using DiffEqBase: DiffEqBase, initialize!, isconstant, update_coefficients!, @.. +using OrdinaryDiffEq: OrdinaryDiffEq, ODEProblem, ODEFunction, ContinuousCallback, + terminate!, solve, Tsit5, BS5, OrdinaryDiffEqNewtonAlgorithm, DEFAULT_LINSOLVE, + alg_order, OrdinaryDiffEqMutableCache, alg_cache, @muladd, perform_step!, @unpack, + unwrap_alg, is_mass_matrix_alg + +using Distances: Distances, PreMetric, SemiMetric, Metric, Euclidean, PeriodicEuclidean, + pairwise, pairwise!, colwise, colwise!, result_type + +using NearestNeighbors: BallTree, KDTree, inrange, knn, MinkowskiMetric + +using Interpolations: Interpolations, LinearInterpolation, CubicSplineInterpolation, + interpolate, scale, BSpline, Linear, Cubic, Natural, OnGrid, Free # import linear algebra related packages -import LinearMaps -const LMs = LinearMaps -import IterativeSolvers -import ArnoldiMethod +using LinearMaps: LinearMap +using IterativeSolvers: cg +using ArnoldiMethod: partialschur, partialeigen # import geometry related packages -import GeometricalPredicates -const GP = GeometricalPredicates -import VoronoiDelaunay -const VD = VoronoiDelaunay - +using GeometricalPredicates: GeometricalPredicates, Point, AbstractPoint2D, Point2D, getx, gety +using VoronoiDelaunay: DelaunayTessellation2D, findindex, isexternal, max_coord, min_coord # Ferrite import Ferrite diff --git a/src/FEMassembly.jl b/src/FEMassembly.jl index 67e70928..20563482 100644 --- a/src/FEMassembly.jl +++ b/src/FEMassembly.jl @@ -1,11 +1,14 @@ # Strongly inspired by an example provided on Ferrite's github page, modified and # extended by Nathanael Schilling +struct Stiffness end +struct Mass end + #Works in n=2 and n=3 -tensorIdentity(x::Vec{dim}, _, p) where {dim} = one(SymmetricTensor{2,dim,Float64,3(dim-1)}) +tensorIdentity(x::Vec{dim,T}, _, p) where {dim,T} = one(SymmetricTensor{2,dim,T}) """ - assembleStiffnessMatrix(ctx, A, p=nothing; bdata=BoundaryData()) + assemble(Stiffness(), ctx; A=Id, p=nothing, bdata=BoundaryData()) Assemble the stiffness-matrix for a symmetric bilinear form ```math @@ -13,28 +16,30 @@ a(u,v) = \\int \\nabla u(x)\\cdot A(x)\\nabla v(x)f(x) dx. ``` The integral is approximated using numerical quadrature. `A` is a function that returns a `SymmetricTensor{2,dim}` object and must have one of the following signatures: - * `A(x::Vector{Float64})`; - * `A(x::Vec{dim})`; - * `A(x::Vec{dim}, index::Int, p)`. Here, `x` is equal to `ctx.quadrature_points[index]`, - and `p` is the one passed to `assembleStiffnessMatrix`. + +* `A(x::Vector{Float64})`; +* `A(x::Vec{dim})`; +* `A(x::Vec{dim}, index::Int, p)`. Here, `x` is equal to `ctx.quadrature_points[index]`, + and `p` is some parameter, think of some precomputed object that is indexed via `index`. The ordering of the result is in dof order, except that boundary conditions from `bdata` are applied. The default is natural (homogeneous Neumann) boundary conditions. """ -function assembleStiffnessMatrix(ctx::GridContext, A=tensorIdentity, p=nothing; bdata=BoundaryData()) +function assemble(::Stiffness, ctx::GridContext; A=tensorIdentity, p=nothing, bdata=BoundaryData()) if A === tensorIdentity - return _assembleStiffnessMatrix(ctx, A, p, bdata=bdata) + return _assembleStiffnessMatrix(ctx, A, p, bdata = bdata) elseif !isempty(methods(A, (Vec,))) return _assembleStiffnessMatrix(ctx, (qp, i, p) -> A(qp), p, bdata=bdata) elseif !isempty(methods(A, (Vec, Int, Any))) return _assembleStiffnessMatrix(ctx, (qp, i, p) -> A(qp, i, p), p, bdata=bdata) elseif !isempty(methods(A, (Vector{Float64},))) - return _assembleStiffnessMatrix(ctx, (qp, i, p) -> A(Vector{Float64}(qp)), p, bdata=bdata) + return _assembleStiffnessMatrix(ctx, (qp, i, p) -> A(convert(Vector{Float64}, qp)), p, bdata=bdata) end - error("Function parameter A does not accept types supported by assembleStiffnessMatrix") + error("function argument `A` does not admit any of the accepted signatures") end +@deprecate assembleStiffnessMatrix(ctx::GridContext, A=tensorIdentity, p=nothing; bdata=BoundaryData()) assemble(Stiffness(), ctx; A=A, p=p, bdata=bdata) -function _assembleStiffnessMatrix(ctx::GridContext, A, p; bdata=BoundaryData()) +function _assembleStiffnessMatrix(ctx, A, p; bdata=BoundaryData()) cv = FEM.CellScalarValues(ctx.qr, ctx.ip, ctx.ip_geom) dh = ctx.dh K = FEM.create_sparsity_pattern(dh) @@ -70,7 +75,7 @@ end """ - assembleMassMatrix(ctx; bdata=BoundaryData(), lumped=false) + assemble(Mass(), ctx; bdata=BoundaryData(), lumped=false) Assemble the mass matrix ```math @@ -86,10 +91,10 @@ Returns a lumped mass matrix if `lumped=true`. # Example ``` ctx.mass_weights = map(f, ctx.quadrature_points) -M = assembleMassMatrix(ctx) +M = assemble(Mass(), ctx) ``` """ -function assembleMassMatrix(ctx::GridContext; bdata=BoundaryData(), lumped=false) +function assemble(::Mass, ctx::GridContext; bdata=BoundaryData(), lumped=false) cv = FEM.CellScalarValues(ctx.qr, ctx.ip, ctx.ip_geom) dh = ctx.dh M = FEM.create_sparsity_pattern(dh) @@ -123,12 +128,12 @@ function assembleMassMatrix(ctx::GridContext; bdata=BoundaryData(), lumped=false M = applyBCS(ctx, M, bdata) return lumped ? spdiagm(0 => dropdims(reduce(+, M; dims=1); dims=1)) : M end +@deprecate assembleMassMatrix(ctx::GridContext; bdata=BoundaryData(), lumped=false) assemble(Mass(), ctx; bdata=bdata, lumped=lumped) """ - getQuadPointsPoints(ctx) + getQuadPoints(ctx) -Compute the coordinates of all quadrature points on a grid. -Helper function. +Compute the coordinates of all quadrature points on a grid. Helper function. """ function getQuadPoints(ctx::GridContext{dim}) where {dim} cv = FEM.CellScalarValues(ctx.qr, ctx.ip_geom) diff --git a/src/TO.jl b/src/TO.jl index 5bcc1f5e..e0eaa05e 100644 --- a/src/TO.jl +++ b/src/TO.jl @@ -50,11 +50,11 @@ function nonAdaptiveTOCollocation( #Calculate the integral of shape function in the domain (dof order) shape_function_weights_domain_original = undoBCS(ctx_domain, - vec(sum(assembleMassMatrix(ctx_domain, bdata=bdata_domain), dims=1)), + vec(sum(assemble(Mass(), ctx_domain, bdata=bdata_domain), dims=1)), bdata_domain) #Calculate the integral of shape functions in the codomain (dof order) #Do not include boundary conditions here, as we end up summing over this later - shape_function_weights_codomain = vec(sum(assembleMassMatrix(ctx_codomain),dims=1)) + shape_function_weights_codomain = vec(sum(assemble(Mass(), ctx_codomain), dims=1)) #This variable will hold approximate pullback (in the measure sense) #of shape_function_weights_codomain to domain via finv @@ -102,23 +102,28 @@ function nonAdaptiveTOCollocation( end """ - adaptiveTOCollocationStiffnessMatrix(ctx,flow_maps,times=nothing; [quadrature_order, on_torus,on_cylinder, LL, UR, bdata, volume_preserving=true,flow_map_mode=0] ) + adaptiveTOCollocationStiffnessMatrix(ctx, flow_maps, times=nothing; [quadrature_order, on_torus,on_cylinder, LL, UR, bdata, volume_preserving=true, flow_map_mode=0] ) -Calculate the matrix-representation of the bilinear form ``a(u,v) = 1/N \\sum_n^N a_1(I_hT_nu,I_hT_nv)`` where -``I_h`` is pointwise interpolation of the grid obtained by doing Delaunay triangulation on images of grid points from ctx -and ``T_n`` is the Transfer-operator for ``x \\mapsto flow_maps(x,times)[n]`` and ``a_1`` is the weak form of the Laplacian on the codomain. Moreover, -``N`` in the equation above is equal to `length(times)` and ``t_n`` ranges over the elements of `times`. +Calculate the matrix-representation of the bilinear form ``a(u,v) = 1/N \\sum_n^N a_1(I_hT_nu,I_hT_nv)`` +where ``I_h`` is pointwise interpolation of the grid obtained by doing Delaunay +triangulation on images of grid points from `ctx` and ``T_n`` is the transfer operator for +``x \\mapsto flow_maps(x, times)[n]`` and ``a_1`` is the weak form of the Laplacian on the +codomain. Moreover, ``N`` in the equation above is equal to `length(times)` and ``t_n`` +ranges over the elements of `times`. -If `times==nothing`, take ``N=1`` above and use the map ``x \\mapsto flow_maps(x)` instead of the version with `t_n`. +If `times==nothing`, take ``N=1`` above and use the map ``x \\mapsto flow_maps(x)`` instead +of the version with `t_n`. -If `on_torus` is true, the Delaunay Triangulation is done on the torus. -If `on_cylinder` is true, then triangulation is done on cylinder (periodic) x. In both of these cases we require `bdata` for boundary information -on the original domain as well as `LL` and `UR` as lower-left and upper-right corners of the image. +If `on_torus` is true, the Delaunay triangulation is done on the torus. +If `on_cylinder` is true, then triangulation is done on an x-periodic cylinder. In both of +these cases we require `bdata` for boundary information on the original domain as well as +`LL` and `UR` as lower-left and upper-right corners of the image. If `volume_preserving == false`, add a volume_correction term to ``a_1`` (See paper by Froyland & Junge). -If `flow_map_mode==0`, apply flow map to nodal basis function coordinates. -If `flow_map_mode==1`, apply flow map to nodal basis function index number (allows for precomputed trajectories). +If `flow_map_mode=0`, apply flow map to nodal basis function coordinates. +If `flow_map_mode=1`, apply flow map to nodal basis function index number (allows for +precomputed trajectories). """ function adaptiveTOCollocationStiffnessMatrix(ctx::GridContext{2}, flow_maps, times=nothing; quadrature_order=default_quadrature_order, @@ -183,58 +188,52 @@ function adaptiveTOCollocationStiffnessMatrix(ctx::GridContext{2}, flow_maps, ti #the old grid new_density_nodevals = new_density_bcdofvals while length(new_density_nodevals) != new_ctx.n - push!(new_density_nodevals,0.0) + push!(new_density_nodevals, 0.0) end - new_ctx.mass_weights = [evaluate_function_from_node_or_cellvals(new_ctx,new_density_nodevals,q) - for q in new_ctx.quadrature_points ] + new_ctx.mass_weights = [evaluate_function_from_node_or_cellvals(new_ctx, new_density_nodevals, q) + for q in new_ctx.quadrature_points] end - I, J, V = findnz(assembleStiffnessMatrix(new_ctx,bdata=new_bdata)) + I, J, V = findnz(assemble(Stiffness(), new_ctx, bdata=new_bdata)) I .= translation_table[I] J .= translation_table[J] n = ctx.n - length(bdata.periodic_dofs_from) - push!(As,sparse(I,J,V,n,n)) + push!(As, sparse(I, J, V, n, n)) end return mean(As) end #Reordering in the periodic case is slightly more tricky -function bcdofNewToBcdofOld( - old_ctx::GridContext{dim},bdata::BoundaryData, - new_ctx::GridContext{dim},new_bdata::BoundaryData,K - ) where {dim} - - I, J ,V = findnz(K) - - #Here I,J are in pdof order for new_ctx - - bcdof_to_node_new = bcdof_to_node(new_ctx,new_bdata) - node_to_bcdof_old = node_to_bcdof(old_ctx,bdata) - I .= node_to_bcdof_old[bcdof_to_node_new[I]] - J .= node_to_bcdof_old[bcdof_to_node_new[J]] - - old_pdof_n = old_ctx.n - length(bdata.periodic_dofs_from) - - return sparse(I,J,V,old_pdof_n,old_pdof_n) +function bcdofNewToBcdofOld(old_ctx::GridContext{dim}, bdata::BoundaryData, + new_ctx::GridContext{dim}, new_bdata::BoundaryData, K) where {dim} + I, J ,V = findnz(K) + + # Here I,J are in pdof order for new_ctx + bcdof_to_node_new = bcdof_to_node(new_ctx, new_bdata) + node_to_bcdof_old = node_to_bcdof(old_ctx, bdata) + I .= node_to_bcdof_old[bcdof_to_node_new[I]] + J .= node_to_bcdof_old[bcdof_to_node_new[J]] + old_pdof_n = old_ctx.n - length(bdata.periodic_dofs_from) + return sparse(I, J, V, old_pdof_n, old_pdof_n) end -function node_to_bcdof(ctx::GridContext{dim},bdata::BoundaryData) where {dim} - n_nodes = ctx.n - length(bdata.periodic_dofs_from) - bdata_table = BCTable(ctx,bdata) - return bdata_table[ctx.node_to_dof] +function node_to_bcdof(ctx::GridContext{dim}, bdata::BoundaryData) where {dim} + n_nodes = ctx.n - length(bdata.periodic_dofs_from) + bdata_table = BCTable(ctx, bdata) + return bdata_table[ctx.node_to_dof] end function bcdof_to_node(ctx::GridContext{dim},bdata::BoundaryData) where {dim} - n_nodes = ctx.n - length(bdata.periodic_dofs_from) - bdata_table = BCTable(ctx,bdata) - result = zeros(Int,n_nodes) - for i in 1:ctx.n - if result[bdata_table[ctx.node_to_dof[i]]] == 0 - result[bdata_table[ctx.node_to_dof[i]]] = i - end + n_nodes = ctx.n - length(bdata.periodic_dofs_from) + bdata_table = BCTable(ctx,bdata) + result = zeros(Int,n_nodes) + for i in 1:ctx.n + if iszero(result[bdata_table[ctx.node_to_dof[i]]]) + result[bdata_table[ctx.node_to_dof[i]]] = i end - return result + end + return result end @@ -274,8 +273,8 @@ function adaptiveTOFutureGrid(ctx::GridContext{dim}, flow_map; #Do volume corrections #All values are in node order for new_ctx, which is the same as bcdof order for ctx2 n_nodes = length(new_nodes_in_bcdof_order) - vols_new = sum(assembleMassMatrix(new_ctx, bdata=new_bdata), dims=1)[1, node_to_bcdof(new_ctx, new_bdata)[1:n_nodes]] - vols_old = sum(assembleMassMatrix(ctx, bdata=bdata), dims=1)[1, 1:n_nodes] + vols_new = sum(assemble(Mass(), new_ctx, bdata=new_bdata), dims=1)[1, node_to_bcdof(new_ctx, new_bdata)[1:n_nodes]] + vols_old = sum(assemble(Mass(), ctx, bdata=bdata), dims=1)[1, 1:n_nodes] return new_ctx, new_bdata, vols_new ./ vols_old end @@ -319,12 +318,12 @@ function adaptiveTOCollocation(ctx::GridContext{dim}, flow_map; throw(AssertionError("Invalid projection_method")) end if volume_preserving - L = sparse(I, npoints, npoints)[node_to_bcdof(ctx,bdata)[bcdof_to_node(ctx_new,bdata_new)],:] + L = sparse(I, npoints, npoints)[node_to_bcdof(ctx, bdata)[bcdof_to_node(ctx_new,bdata_new)],:] result = ALPHA_bc*L else volume_change_pdof = volume_change[bcdof_to_node(ctx,bdata)] - K = assembleStiffnessMatrix(ctx,bdata=bdata) - M = assembleMassMatrix(ctx,bdata=bdata) + K = assemble(Stiffness(), ctx, bdata = bdata) + M = assemble(Mass(), ctx, bdata = bdata) volume_change_pdof = (M - 1e-2K)\(M*volume_change_pdof) volume_change = volume_change_pdof[node_to_bcdof(ctx,bdata)] diff --git a/src/advection_diffusion.jl b/src/advection_diffusion.jl index 20785d1c..8fb52f61 100644 --- a/src/advection_diffusion.jl +++ b/src/advection_diffusion.jl @@ -33,7 +33,7 @@ function FEM_heatflow(odefun!, ctx::GridContext, tspan, κ::Real, p=nothing, bda end function implicitEulerStepFamily(ctx::GridContext, sol, tspan, κ, δ; factor=true, bdata=BoundaryData()) - M = assembleMassMatrix(ctx, bdata=bdata) + M = assemble(Mass(), ctx, bdata = bdata) nnzM = nonzeros(M) n = size(M, 1) scale = -step(tspan) * κ @@ -51,10 +51,10 @@ function implicitEulerStepFamily(ctx::GridContext, sol, tspan, κ, δ; factor=tr end else matmul = let A=K - (u, v) -> copyto!(u, IterativeSolvers.cg(A, v)) + (u, v) -> copyto!(u, cg(A, v)) end end - LMs.LinearMap(matmul, matmul, n, n; issymmetric=true, ishermitian=true, isposdef=true) * M + LinearMap(matmul, matmul, n, n; issymmetric=true, ishermitian=true, isposdef=true) * M end return prod(reverse(P)) end @@ -66,10 +66,10 @@ Single step with implicit Euler method. """ function ADimplicitEulerStep(ctx::GridContext, u::AbstractVector, edt::Real; Afun=nothing, q=nothing, M=nothing, K=nothing) if M === nothing - M = assembleMassMatrix(ctx) + M = assemble(Mass(), ctx) end if K === nothing - K = assembleStiffnessMatrix(ctx, Afun, q) + K = assemble(Stiffness(), ctx, A = Afun, p = q) end return (M - edt * K) \ (M * u) end @@ -89,27 +89,27 @@ function advect_serialized_quadpoints(ctx::GridContext, tspan, odefun!, p=nothin u0 = setup_fd_quadpoints_serialized(ctx, δ) p2 = Dict("ctx" => ctx, "p" => p) - prob = ODE.ODEProblem{true}( + prob = ODEProblem{true}( (du, u, p, t) -> large_rhs(odefun!, du, u, p, t), u0, (tspan[1], tspan[end]), p2) - return ODE.solve(prob, solver, abstol=tolerance, reltol=tolerance) + return solve(prob, solver, abstol=tolerance, reltol=tolerance) end function stiffnessMatrixTimeT(ctx, sol, t, δ=1e-9; bdata=BoundaryData()) if t < 0 - return assembleStiffnessMatrix(ctx, bdata=bdata) + return assemble(Stiffness(), ctx, bdata = bdata) end p = sol(t) function Afun(x, index, p) Df = Tensor{2,2}( (p[(8*(index-1) + 1):(8*(index-1) + 4)] - p[ (8*(index-1)+5):(8*(index-1) + 8)])/(2δ) ) - return Tensors.dott(inv(Df)) + return dott(inv(Df)) end - return assembleStiffnessMatrix(ctx, Afun, p, bdata=bdata) + return assemble(Stiffness(), ctx, A = Afun, p = p, bdata = bdata) end @inline function large_rhs(odefun!, du, u, p, t) @@ -162,12 +162,12 @@ function extendedRHSStiff!(A, u, p, t) (u[(4*(i-1) +1):(4*i)] - u[(4*n_quadpoints +1 +4*(i-1)):(4*n_quadpoints+4*i)])/2δ ) end - invDiffTensors = Tensors.dott.(inv.(DF)) + invDiffTensors = dott.(inv.(DF)) ctx = p["ctx"] κ = p["κ"] - K = assembleStiffnessMatrix(ctx, PCDiffTensors, invDiffTensors) + K = assemble(Stiffness(), ctx, A = PCDiffTensors, p = invDiffTensors) Is, Js, Vs = findnz(K) - M = assembleMassMatrix(ctx, lumped=true) + M = assemble(Mass(), ctx, lumped=true) for index in eachindex(Is, Js, Vs) i = Is[index] j = Js[index] diff --git a/src/boundaryconditions.jl b/src/boundaryconditions.jl index f5eada25..1a0e8002 100644 --- a/src/boundaryconditions.jl +++ b/src/boundaryconditions.jl @@ -360,10 +360,10 @@ function identifyPoints(ctx::GridContext{dim}, predicate::Distances.Metric) wher end TOL = 1e-12 #TODO: set this somewhere globally - S = NN.BallTree(l, predicate) + S = BallTree(l, predicate) for index in 1:ctx.n - res = NN.inrange(S, getDofCoordinates(ctx, index), TOL, true) + res = inrange(S, getDofCoordinates(ctx, index), TOL, true) if res[1] != index push!(identify_from, index) push!(identify_to, res[1]) diff --git a/src/diffusion_operators.jl b/src/diffusion_operators.jl index 55b4d323..8179bdae 100644 --- a/src/diffusion_operators.jl +++ b/src/diffusion_operators.jl @@ -72,7 +72,7 @@ function DM_heatflow( p0, sp_method::SparsificationMethod, kernel; - metric = Dists.Euclidean(), + metric = Euclidean(), ) data = pmap(flow_fun, p0; batch_size = ceil(sqrt(length(p0)))) sparse_diff_op_family(data, sp_method, kernel; metric = metric) @@ -104,9 +104,9 @@ function sparse_diff_op_family( data::AbstractArray{<:AbstractVector{<:SVector}}, sp_method::SparsificationMethod, kernel = gaussian(), - op_reduce = (P -> prod(reverse(LMs.LinearMap.(P)))); + op_reduce = (P -> prod(reverse(LinearMap.(P)))); α = 1, - metric = Dists.Euclidean(), + metric = Euclidean(), verbose::Bool = false, ) N = length(data) # number of trajectories @@ -114,7 +114,7 @@ function sparse_diff_op_family( q = axes(first(data), 1) # time axis all(d -> axes(d, 1) == q, data) || throw("inhomogeneous trajectory lengths") length(q) == 0 && throw("trajectories have length 0") - P = Distributed.pmap(q) do t + P = pmap(q) do t Pt = sparse_diff_op(getindex.(data, t), sp_method, kernel; α=α, metric=metric) verbose && println("time step $t/$q done") Pt @@ -143,10 +143,10 @@ function sparse_diff_op( sp_method::SparsificationMethod, kernel = gaussian(); α = 1.0, - metric = Dists.Euclidean(), + metric = Euclidean(), ) P = spdist(data, sp_method, metric) - N = LinearAlgebra.checksquare(P) + N = checksquare(P) if sp_method isa Neighborhood # P corresponds to the adjacency matrix if kernel != Base.one # otherwise no need to change entries rows = rowvals(P) @@ -190,7 +190,7 @@ i.e., return ``a_{ij}:=a_{ij}/q_i^{\\alpha}/q_j^{\\alpha}``, where end @inline function lrmul!(A::SparseMatrixCSC, qₑ) - nzv = SparseArrays.nzvalview(A) + nzv = nzvalview(A) rv = rowvals(A) @inbounds for col in 1:size(A, 2), p in nzrange(A, col) nzv[p] = qₑ[rv[p]] * nzv[p] * qₑ[col] @@ -202,23 +202,6 @@ end return A end -# compat -if VERSION < v"1.2.0" - function LinearAlgebra.ldiv!(D::Diagonal, A::SparseMatrixCSC) - # @assert !has_offset_axes(A) - if A.m != length(D.diag) - throw(DimensionMismatch("diagonal matrix is $(length(D.diag)) by $(length(D.diag)) but right hand side has $(A.m) rows")) - end - nonz = SparseArrays.nzvalview(A) - Arowval = rowvals(A) - d = D.diag - @inbounds for col in 1:size(A, 2), p in nzrange(A, col) - nonz[p] = d[Arowval[p]] \ nonz[p] - end - A - end -end - """ row_normalize!(A) @@ -254,9 +237,9 @@ Compute the stationary distribution for a Markov transition operator. is given by a function. """ function stationary_distribution(P) - decomp, history = ArnoldiMethod.partialschur(P; nev=1, tol=0.0) + decomp, history = partialschur(P; nev=1, tol=0.0) history.converged || error("computation of stationary distribution failed") - λs, X = ArnoldiMethod.partialeigen(decomp) + λs, X = partialeigen(decomp) # λs, X = Arpack.eigs(P; nev = 1, ncv = 50, maxiter = maxiter) Π = dropdims(real(X), dims = 2) # stationary distribution ext = extrema(Π) @@ -267,10 +250,10 @@ function stationary_distribution(P) return Π end -@inline function L_mul_Lt(L::LMs.LinearMap, Π) +@inline function L_mul_Lt(L::LinearMap, Π) Πsqrt = Diagonal(sqrt.(Π)) Πinv = Diagonal(inv.(Π)) - return LMs.LinearMap( + return LinearMap( Πsqrt * L * Πinv * transpose(L) * Πsqrt; issymmetric = true, ishermitian = true, @@ -279,8 +262,8 @@ end end @inline function L_mul_Lt(L::AbstractMatrix, Π) L .= sqrt.(Π) .* L .* permutedims(inv.(sqrt.(Π))) - LMap = LMs.LinearMap(L) - return LMs.LinearMap( + LMap = LinearMap(L) + return LinearMap( LMap * transpose(LMap); issymmetric = true, ishermitian = true, @@ -296,16 +279,16 @@ Compute the (time-coupled) diffusion coordinate matrix `Ψ` and the coordinate w diffusion coordinates to be computed. """ function diffusion_coordinates(P, n_coords) - N = LinearAlgebra.checksquare(P) + N = checksquare(P) n_coords <= N || throw(error("number of requested coordinates, $n_coords, too large, only $N samples available")) Π = stationary_distribution(transpose(P)) # Compute relevant SVD info for P by computing eigendecomposition of P*P' L = L_mul_Lt(P, Π) - decomp, history = ArnoldiMethod.partialschur(L; nev=n_coords, tol=0.0) + decomp, history = partialschur(L; nev=n_coords, tol=0.0) history.converged || error("computation of stationary distribution failed") - λs, V = ArnoldiMethod.partialeigen(decomp) + λs, V = partialeigen(decomp) # λs, V = Arpack.eigs( # L; @@ -341,4 +324,4 @@ end Returns the symmetric pairwise diffusion distance matrix corresponding to points whose diffusion coordinates are given by `Ψ`. """ -diffusion_distance(Ψ) = Dists.pairwise(Dists.Euclidean(), Ψ) +diffusion_distance(Ψ) = pairwise(Euclidean(), Ψ) diff --git a/src/dynamicmetrics.jl b/src/dynamicmetrics.jl index cf3bf45c..e88b3d38 100644 --- a/src/dynamicmetrics.jl +++ b/src/dynamicmetrics.jl @@ -39,57 +39,53 @@ julia> d(x, y) ≈ sqrt(sum(abs2, Euclidean().(x, y))/10) true ``` """ -struct STmetric{M<:Dists.SemiMetric,T<:Real} <: Dists.SemiMetric +struct STmetric{M<:SemiMetric,T<:Real} <: SemiMetric metric::M p::T end # defaults: metric = Euclidean(), dim = 2, p = 1 -STmetric(d = Dists.Euclidean()) = STmetric(d, 1) +STmetric(d = Euclidean()) = STmetric(d, 1) -function (dist::STmetric)(a::AbstractArray, b::AbstractArray) - return Dists._evaluate(dist, a, b, nothing) -end - -function Dists.result_type( +function Distances.result_type( d::STmetric, a::AbstractVector{S}, b::AbstractVector{S}, ) where {S} - T = Dists.result_type(d.metric, zero(S), zero(S)) - return typeof(Dists.eval_end(d, Dists.eval_reduce(d, zero(T), zero(T)), 2)) + T = result_type(d.metric, zero(S), zero(S)) + return typeof(_end(d, _reduce(d, zero(T), zero(T)), 2)) end -Base.@propagate_inbounds function Dists._evaluate(d::STmetric, a::AbstractArray, b::AbstractArray, ::Nothing) +Base.@propagate_inbounds function (d::STmetric)(a::AbstractArray, b::AbstractArray) n = length(a) @boundscheck if n != length(b) throw(DimensionMismatch("first array has length $n which does not match the length of the second, $(length(b)).")) end if n == 0 - return zero(Dists.result_type(d, a, b)) + return zero(result_type(d, a, b)) end @inbounds begin - s = Dists.eval_start(d, a, b) + s = _start(d, a, b) if (IndexStyle(a, b) === IndexLinear() && eachindex(a) == eachindex(b)) || axes(a) == axes(b) @simd for I in eachindex(a, b) ai = a[I] bi = b[I] - s = Dists.eval_reduce(d, s, d.metric(ai, bi)) + s = _reduce(d, s, d.metric(ai, bi)) end else for (Ia, Ib) in zip(eachindex(a), eachindex(b)) ai = a[Ia] bi = b[Ib] - s = Dists.eval_reduce(d, s, d.metric(ai, bi)) + s = _reduce(d, s, d.metric(ai, bi)) end end - return Dists.eval_end(d, s, n) + return _end(d, s, n) end end -@inline Dists.eval_start(d::STmetric, a, b) = d.p == -Inf ? typemax(Dists.result_type(d, a, b)) : zero(Dists.result_type(d, a, b)) +@inline _start(d, a, b) = (T = result_type(d, a, b); d.p == -Inf ? typemax(T) : zero(T)) -@inline function Dists.eval_reduce(d::STmetric, s1, s2) +@inline function _reduce(d, s1, s2) p = d.p if p == 1 return s1 + s2 @@ -108,7 +104,7 @@ end end end -@inline function Dists.eval_end(d::STmetric, s, n) +@inline function _end(d, s, n) p = d.p isinf(p) && return s t = s/n @@ -128,13 +124,13 @@ end function stmetric( a::AbstractVector{T}, b::AbstractVector{T}, - d::Dists.SemiMetric = Dists.Euclidean(), + d::SemiMetric = Euclidean(), p::Real = 1, ) where {T<:SVector} return STmetric(d, p)(a, b) end -function Dists.pairwise!( +function Distances.pairwise!( r::AbstractMatrix, metric::STmetric, a::AbstractVector{<:AbstractVector{T}}, @@ -151,7 +147,7 @@ function Dists.pairwise!( end r end -function Dists.pairwise!( +function Distances.pairwise!( r::AbstractMatrix, metric::STmetric, a::AbstractVector{<:AbstractVector{<:SVector}}; @@ -172,7 +168,7 @@ function Dists.pairwise!( r end -function Dists.pairwise( +function Distances.pairwise( metric::STmetric, a::AbstractVector{<:AbstractVector{T}}, b::AbstractVector{<:AbstractVector{T}}; @@ -181,21 +177,21 @@ function Dists.pairwise( la = length(a) lb = length(b) (la == 0 || lb == 0) && return reshape(Float64[], 0, 0) - r = Matrix{Dists.result_type(metric, first(a), first(b))}(undef, la, lb) - Dists.pairwise!(r, metric, a, b) + r = Matrix{result_type(metric, first(a), first(b))}(undef, la, lb) + pairwise!(r, metric, a, b) end -function Dists.pairwise( +function Distances.pairwise( metric::STmetric, a::AbstractVector{<:AbstractVector{<:SVector}}; dims::Union{Nothing,Integer} = nothing, ) la = length(a) la == 0 && return reshape(Float64[], 0, 0) - r = Matrix{Dists.result_type(metric, first(a), first(a))}(undef, la, la) - Dists.pairwise!(r, metric, a) + r = Matrix{result_type(metric, first(a), first(a))}(undef, la, la) + pairwise!(r, metric, a) end -function Dists.colwise!( +function Distances.colwise!( r::AbstractVector, metric::STmetric, a::AbstractVector{T}, @@ -208,15 +204,15 @@ function Dists.colwise!( end r end -function Dists.colwise!( +function Distances.colwise!( r::AbstractVector, metric::STmetric, a::AbstractVector{<:AbstractVector{T}}, b::AbstractVector{T}, ) where {T<:SVector} - Dists.colwise!(r, metric, b, a) + colwise!(r, metric, b, a) end -function Dists.colwise!( +function Distances.colwise!( r::AbstractVector, metric::STmetric, a::T, @@ -232,26 +228,21 @@ function Dists.colwise!( r end -function Dists.colwise( +function Distances.colwise( metric::STmetric, a::AbstractVector{T}, b::AbstractVector{<:AbstractVector{T}}, ) where {T<:SVector} - Dists.colwise!( - Vector{Dists.result_type(metric, a, first(b))}(undef, length(b)), - metric, - a, - b, - ) + colwise!(Vector{result_type(metric, a, first(b))}(undef, length(b)), metric, a, b) end -function Dists.colwise( +function Distances.colwise( metric::STmetric, a::AbstractVector{<:AbstractVector{T}}, b::AbstractVector{T}, ) where {T<:SVector} - return Dists.colwise(metric, b, a) + return colwise(metric, b, a) end -function Dists.colwise( +function Distances.colwise( metric::STmetric, a::T, b::T, @@ -260,8 +251,8 @@ function Dists.colwise( lb = length(b) la == lb || throw(DimensionMismatch("lengths of a, $la, and b, $lb, do not match")) la == 0 && return reshape(Float64[], 0, 0) - Dists.colwise!( - Vector{Dists.result_type(metric, first(a), first(b))}(undef, la), + colwise!( + Vector{result_type(metric, first(a), first(b))}(undef, la), metric, a, b, @@ -275,32 +266,32 @@ end Return a sparse distance matrix as determined by the sparsification method `sp_method` and `metric`. """ -function spdist(data, sp_method::Neighborhood, metric::Dists.PreMetric = Distances.Euclidean()) +function spdist(data, sp_method::Neighborhood, metric::PreMetric = Euclidean()) N = length(data) # number of states data = vec(data) # TODO: check for better leafsize values tree = - metric isa NN.MinkowskiMetric ? NN.KDTree(data, metric; leafsize = 10) : - NN.BallTree(data, metric; leafsize = 10) - idxs = NN.inrange(tree, data, sp_method.ε, false) + metric isa MinkowskiMetric ? KDTree(data, metric; leafsize = 10) : + BallTree(data, metric; leafsize = 10) + idxs = inrange(tree, data, sp_method.ε, false) Js = vcat(idxs...) Is = vcat([fill(i, length(idxs[i])) for i in eachindex(idxs)]...) Vs = fill(1.0, length(Is)) return sparse(Is, Js, Vs, N, N, *) end -function spdist(data, sp_method::Union{KNN,MutualKNN}, metric::Dists.PreMetric = Distances.Euclidean()) +function spdist(data, sp_method::Union{KNN,MutualKNN}, metric::PreMetric = Euclidean()) N = length(data) # number of states data = vec(data) # TODO: check for better leafsize values tree = - metric isa NN.MinkowskiMetric ? NN.KDTree(data, metric; leafsize = 10) : - NN.BallTree(data, metric; leafsize = 10) - idxs, dists = NN.knn(tree, data, sp_method.k, false) + metric isa MinkowskiMetric ? KDTree(data, metric; leafsize = 10) : + BallTree(data, metric; leafsize = 10) + idxs, dists = knn(tree, data, sp_method.k, false) Js = vcat(idxs...) Is = vcat([fill(i, length(idxs[i])) for i in eachindex(idxs)]...) Ds = vcat(dists...) D = sparse(Is, Js, Ds, N, N) - SparseArrays.dropzeros!(D) + dropzeros!(D) if sp_method isa KNN return max.(D, permutedims(D)) else # sp_method isa MutualKNN @@ -309,7 +300,7 @@ function spdist(data, sp_method::Union{KNN,MutualKNN}, metric::Dists.PreMetric = end function spdist(data, sp_method::Neighborhood, metric::STmetric) N = length(data) # number of trajectories - T = Dists.result_type(metric, first(data), first(data)) + T = result_type(metric, first(data), first(data)) I1 = collect(1:N) J1 = collect(1:N) V1 = zeros(T, N) @@ -326,12 +317,8 @@ function spdist(data, sp_method::Neighborhood, metric::STmetric) end return Is, Js, Vs end - if Distributed.nprocs() > 1 - IJV = Distributed.pmap( - tempfun, - 1:(N-1); - batch_size = (N ÷ Distributed.nprocs()^2), - ) + if nprocs() > 1 + IJV = pmap(tempfun, 1:(N-1); batch_size = (N ÷ nprocs()^2)) else IJV = map(tempfun, 1:(N-1)) end @@ -349,15 +336,15 @@ function spdist(data, sp_method::Union{KNN,MutualKNN}, metric::STmetric) Ds = SharedArray{T}(N * (k + 1)) perm = collect(1:N) ds = Vector{T}(undef, N) - # Distributed.@everywhere index = Vector{Int}(undef, k+1) - @inbounds @sync Distributed.@distributed for i in 1:N - Dists.colwise!(ds, metric, data[i], data) + # @everywhere index = Vector{Int}(undef, k+1) + @inbounds @sync @distributed for i in 1:N + colwise!(ds, metric, data[i], data) index = partialsortperm!(perm, ds, 1:(k+1), initialized = true) Js[(i-1)*(k+1)+1:i*(k+1)] = index Ds[(i-1)*(k+1)+1:i*(k+1)] = ds[index] end D = sparse(Is, Js, Ds, N, N) - # SparseArrays.dropzeros!(D) + # dropzeros!(D) if sp_method isa KNN return max.(D, permutedims(D)) else # sp_method isa MutualKNN @@ -367,7 +354,7 @@ end ########### parallel pairwise computation ################# -# function Dists.pairwise!(r::SharedMatrix{T}, d::STmetric, a::AbstractMatrix, b::AbstractMatrix) where T <: Real +# function Distances.pairwise!(r::SharedMatrix{T}, d::STmetric, a::AbstractMatrix, b::AbstractMatrix) where T <: Real # ma, na = size(a) # mb, nb = size(b) # size(r) == (na, nb) || throw(DimensionMismatch("Incorrect size of r.")) @@ -375,8 +362,8 @@ end # q, s = divrem(ma, d.dim) # s == 0 || throw(DimensionMismatch("Number of rows is not a multiple of spatial dimension $(d.dim).")) # dists = Vector{T}(q) -# Distributed.@everywhere dists = $dists -# @inbounds @sync Distributed.@distributed for j = 1:nb +# @everywhere dists = $dists +# @inbounds @sync @distributed for j = 1:nb # for i = 1:na # eval_space!(dists, d, view(a, :, i), view(b, :, j), q) # r[i, j] = reduce_time(d, dists, q) @@ -385,7 +372,7 @@ end # r # end # -# function Dists.pairwise!(r::SharedMatrix{T}, d::STmetric, a::AbstractMatrix) where T <: Real +# function Distances.pairwise!(r::SharedMatrix{T}, d::STmetric, a::AbstractMatrix) where T <: Real # m, n = size(a) # size(r) == (n, n) || throw(DimensionMismatch("Incorrect size of r.")) # q, s = divrem(m, d.dim) @@ -393,8 +380,8 @@ end # entries = div(n*(n+1),2) # dists = Vector{T}(undef, q) # i, j = 1, 1 -# Distributed.@everywhere dists, i, j = $dists, $i, $j -# @inbounds @sync Distributed.@distributed for k = 1:entries +# @everywhere dists, i, j = $dists, $i, $j +# @inbounds @sync @distributed for k = 1:entries # tri_indices!(i, j, k) # if i == j # r[i, i] = zero(T) @@ -411,17 +398,17 @@ end # r # end # -# function Dists.pairwise(metric::STmetric, a::AbstractMatrix, b::AbstractMatrix) +# function Distances.pairwise(metric::STmetric, a::AbstractMatrix, b::AbstractMatrix) # m = size(a, 2) # n = size(b, 2) -# r = SharedArray{Dists.result_type(metric, a, b)}(m, n) -# Dists.pairwise!(r, metric, a, b) +# r = SharedArray{result_type(metric, a, b)}(m, n) +# pairwise!(r, metric, a, b) # end # -# function Dists.pairwise(metric::STmetric, a::AbstractMatrix) +# function Distances.pairwise(metric::STmetric, a::AbstractMatrix) # n = size(a, 2) -# r = SharedArray{Dists.result_type(metric, a, a)}(n, n) -# Dists.pairwise!(r, metric, a) +# r = SharedArray{result_type(metric, a, a)}(n, n) +# pairwise!(r, metric, a) # end # # @inline function tri_indices!(i::Int, j::Int, n::Int) diff --git a/src/ellipticLCS.jl b/src/ellipticLCS.jl index 99dcb211..4279074b 100644 --- a/src/ellipticLCS.jl +++ b/src/ellipticLCS.jl @@ -349,7 +349,7 @@ function (c::Combine)(singularities::Vector{<:Singularity}) N = length(singularities) - sing_tree = NN.KDTree(getcoords(singularities), Dists.Euclidean()) + sing_tree = KDTree(getcoords(singularities), Euclidean()) #Which singularities we've already dealt with sing_seen = falses(N) @@ -373,7 +373,7 @@ function (c::Combine)(singularities::Vector{<:Singularity}) while !isempty(stack) current_singularity = pop!(stack) sing_seen[i] = true - closeby_sings = NN.inrange( + closeby_sings = inrange( sing_tree, singularities[current_singularity].coords, c.distance, @@ -407,7 +407,7 @@ while adding their indices. function (c::Combine20)(singularities::Vector{<:Singularity}) N = length(singularities) N == 1 && return singularities - sing_tree = NN.KDTree(getcoords(singularities), Dists.Euclidean()) + sing_tree = KDTree(getcoords(singularities), Euclidean()) sing_seen = falses(N) new_sings = eltype(singularities)[] # sing_out @@ -422,7 +422,7 @@ function (c::Combine20)(singularities::Vector{<:Singularity}) continue end #We have an index +1/2 singularity - idxs, _ = NN.knn(sing_tree, singularities[i].coords, 2, true) + idxs, _ = knn(sing_tree, singularities[i].coords, 2, true) nn_idx = idxs[2] #We've already dealt with the nearest neighbor (but didn't find @@ -433,7 +433,7 @@ function (c::Combine20)(singularities::Vector{<:Singularity}) end #See if the nearest neighbor of the nearest neighbor is i - idxs2, _ = NN.knn(sing_tree, singularities[nn_idx].coords, 2, true) + idxs2, _ = knn(sing_tree, singularities[nn_idx].coords, 2, true) if idxs2[2] != i push!(new_sings, singularities[i]) continue @@ -460,7 +460,7 @@ turbulent example yielded only about an additional 1% material barriers. function (c::Combine31)(singularities::Vector{<:Singularity}) N = length(singularities) N <= 2 && return singularities - sing_tree = NN.KDTree(getcoords(singularities), Dists.Euclidean()) + sing_tree = KDTree(getcoords(singularities), Euclidean()) sing_seen = falses(N) new_sings = eltype(singularities)[] # sing_out @@ -471,7 +471,7 @@ function (c::Combine31)(singularities::Vector{<:Singularity}) continue end - idxs, _ = NN.knn(sing_tree, singularities[i].coords, 4, true) + idxs, _ = knn(sing_tree, singularities[i].coords, 4, true) correct_configuration = true for j in 1:3 if singularities[idxs[j+1]].index != 1 // 2 @@ -515,7 +515,7 @@ A heuristic for combining singularities which is likely to have a lot of false p function (c::Combine20Aggressive)(singularities::Vector{<:Singularity}) N = length(singularities) N == 1 && return singularities - sing_tree = NN.KDTree(getcoords(singularities), Dists.Euclidean()) + sing_tree = KDTree(getcoords(singularities), Euclidean()) combined_with = [Int[] for _ in 1:N] sing_seen = falses(N) @@ -528,7 +528,7 @@ function (c::Combine20Aggressive)(singularities::Vector{<:Singularity}) end #We have an index +1/2 singularity - idxs, _ = NN.knn(sing_tree, cur_sing.coords, 2, true) + idxs, _ = knn(sing_tree, cur_sing.coords, 2, true) nn_idx = idxs[2] nn_sing = singularities[nn_idx] @@ -543,7 +543,7 @@ function (c::Combine20Aggressive)(singularities::Vector{<:Singularity}) midpoint = 0.5 .* (cur_sing.coords .+ nn_sing.coords) width = norm(cur_sing.coords .- midpoint) - idxs2 = NN.inrange(sing_tree, midpoint, width) + idxs2 = inrange(sing_tree, midpoint, width) function in_rect(p, p1, p2) xmax = max(p1[1], p2[1]) @@ -668,16 +668,16 @@ returning orbit should be saved. Returns a tuple of `orbit` and `statuscode` (`0` for success, `1` for `maxiters` reached, `2` for out of bounds error, 3 for other error). """ -function compute_returning_orbit(vf::ODE.ODEFunction, seed, save = false, maxiters = 2000, tolerance = 1e-8, max_orbit_length = 20.0) +function compute_returning_orbit(vf::ODEFunction, seed, save = false, maxiters = 2000, tolerance = 1e-8, max_orbit_length = 20.0) dir = vf(seed, nothing, 0.0)[2] < 0 ? -1 : 1 # Whether orbits initially go upwards condition(u, t, integrator) = dir * (seed[2] - u[2]) - affect!(integrator) = ODE.terminate!(integrator) - cb = ODE.ContinuousCallback(condition, nothing, affect!) - prob = ODE.ODEProblem(vf, seed, (0.0, max_orbit_length)) + affect!(integrator) = terminate!(integrator) + cb = ContinuousCallback(condition, nothing, affect!) + prob = ODEProblem(vf, seed, (0.0, max_orbit_length)) try - sol = ODE.solve( + sol = solve( prob, - ODE.Tsit5(), + Tsit5(), maxiters = maxiters, dense = false, save_everystep = save, @@ -742,10 +742,10 @@ constructor `ηfield`, and an LCScache `cache`. """ function compute_closed_orbits(ps::AbstractVector{<:SVector{2}}, ηfield, cache; rev::Bool = true, p::LCSParameters = LCSParameters()) if cache isa LCScache # tensor-based LCS computation - l1itp = ITP.LinearInterpolation(cache.λ₁) - l2itp = ITP.LinearInterpolation(cache.λ₂) + l1itp = LinearInterpolation(cache.λ₁) + l2itp = LinearInterpolation(cache.λ₂) else # vector-field-based LCS computation - nitp = ITP.LinearInterpolation(map(v -> norm(v)^2, cache)) + nitp = LinearInterpolation(map(v -> norm(v)^2, cache)) end # define local helper functions for the η⁺/η⁻ closed orbit detection prd(λ::Float64, σ::Bool, seed::SVector{2}, cache) = @@ -767,12 +767,12 @@ function compute_closed_orbits(ps::AbstractVector{<:SVector{2}}, ηfield, cache; # end # λrange = range(pmin, stop=pmax, length=20) # ηdata = cat([η(λ, false) for λ in λrange]..., dims=3) - # ηitp = ITP.scale(ITP.interpolate(ηdata, - # (ITP.BSpline(ITP.Cubic(ITP.Natural(ITP.OnGrid()))), - # ITP.BSpline(ITP.Cubic(ITP.Natural(ITP.OnGrid()))), - # ITP.BSpline(ITP.Linear()))), + # ηitp = scale(interpolate(ηdata, + # (BSpline(Cubic(Natural(OnGrid()))), + # BSpline(Cubic(Natural(OnGrid()))), + # BSpline(Linear()))), # xspan, yspan, λrange) - # ηfield(λ::Float64) = ODE.ODEFunction{false}((u, p, t) -> ηitp(u[1], u[2], λ)) + # ηfield(λ::Float64) = ODEFunction{false}((u, p, t) -> ηitp(u[1], u[2], λ)) # prd(λ::Float64, seed::SVector{2,S}) = Poincaré_return_distance(ηfield(λ), seed) # END OF VERSION 2 @@ -913,8 +913,8 @@ function debugAt( p::LCSParameters = LCSParameters(), ) where {S} cache = orient(T[:, :], SVector{2}(orientaround[1], orientaround[2])) - l1itp = ITP.LinearInterpolation(cache.λ₁) - l2itp = ITP.LinearInterpolation(cache.λ₂) + l1itp = LinearInterpolation(cache.λ₁) + l2itp = LinearInterpolation(cache.λ₂) result = [] for σ ∈ [true, false] for λ ∈ range(p.pmin, stop = p.pmax, length = 50) @@ -1003,13 +1003,13 @@ end function ηfield(λ::Float64, σ::Bool, c::LCScache) op = σ ? (-) : (+) @. c.η = op(min(sqrt(max(c.λ₂ - λ, eps()) / c.Δ), 1.0) * c.ξ₁, min(sqrt(max(λ - c.λ₁, eps()) / c.Δ), 1.0) * c.ξ₂) - itp = ITP.LinearInterpolation(c.η) + itp = LinearInterpolation(c.η) function unit_length_itp(u, p, t) result = itp(u[1], u[2]) normresult = sqrt(result[1]^2 + result[2]^2) return normresult == 0 ? result : result / normresult end - return ODE.ODEFunction{false}(unit_length_itp) + return ODEFunction{false}(unit_length_itp) end """ @@ -1067,16 +1067,14 @@ function getvortices( #How many vortex centers we have num_jobs = length(vortexcenters) - jobs_queue_length = debug ? num_jobs : Distributed.nprocs() - results_queue_length = debug ? num_jobs : 2 * Distributed.nprocs() + jobs_queue_length = debug ? num_jobs : nprocs() + results_queue_length = debug ? num_jobs : 2 * nprocs() jobs_rc = RemoteChannel( () -> Channel{Tuple{S,S,S,LCSParameters,Bool,Ttype}}(jobs_queue_length), ) results_rc = RemoteChannel( - () -> Channel{Tuple{S,S,Vector{EllipticBarrier}}}( - results_queue_length, - ), + () -> Channel{Tuple{S,S,Vector{EllipticBarrier}}}(results_queue_length), ) #Producer job @@ -1166,7 +1164,7 @@ function getvortices( num_barriers = 0 if verbose - pm = Progress(num_jobs, desc = "Detecting vortices") + pm = ProgressMeter.Progress(num_jobs, desc = "Detecting vortices") end foreach(1:num_jobs) do i vx, vy, barriers = take!(results_rc) @@ -1205,11 +1203,11 @@ function makeVortexListUnique(vortices::Vector{EllipticVortex}, indexradius) end which_not_to_add = falses(N) vortexcenters = [v.center for v in vortices] - vortexcenters_tree = NN.KDTree(vortexcenters, Dists.Euclidean()) + vortexcenters_tree = KDTree(vortexcenters, Euclidean()) result = typeof(vortices[1])[] for i in 1:N which_not_to_add[i] && continue - idxs2 = NN.inrange(vortexcenters_tree, vortexcenters[i], 2 * indexradius) + idxs2 = inrange(vortexcenters_tree, vortexcenters[i], 2 * indexradius) for j in idxs2 j == i && continue c1 = [SVector{2}(p[1], p[2]) for p in vortices[j].barriers[1].curve] @@ -1282,8 +1280,8 @@ function constrainedLCS( # the results in results_rc num_jobs = length(vortexcenters) - jobs_queue_length = debug ? num_jobs : Distributed.nprocs() - results_queue_length = debug ? num_jobs : 2 * Distributed.nprocs() + jobs_queue_length = debug ? num_jobs : nprocs() + results_queue_length = debug ? num_jobs : 2 * nprocs() jobs_rc = RemoteChannel( @@ -1349,14 +1347,14 @@ function constrainedLCS( cache = deepcopy(q_local) normsqq = map(v -> norm(v)^2, q_local) - # nitp = ITP.LinearInterpolation(normsqq) + # nitp = LinearInterpolation(normsqq) invnormsqq = map(x -> iszero(x) ? one(x) : inv(x), normsqq) q1 = invnormsqq .* q_local function constrainedLCSηfield(λ, s, cache) op = s ? (-) : (+) cache .= op.(sqrt.(max.(normsqq .- (λ^2), 0)) .* q1, λ .* Ref(Ω) .* q1) - itp = ITP.LinearInterpolation(cache) - return ODE.ODEFunction{false}( + itp = LinearInterpolation(cache) + return ODEFunction{false}( (u, p, t) -> itp(u[1], u[2]), ) end @@ -1391,7 +1389,7 @@ function constrainedLCS( num_barriers = 0 if verbose - pm = Progress(num_jobs, desc = "Detecting vortices") + pm = ProgressMeter.Progress(num_jobs, desc = "Detecting vortices") end map(1:num_jobs) do i vx, vy, barriers = take!(results_rc) @@ -1517,7 +1515,7 @@ function materialbarriersTensors( kwargs..., ) end - return pmap(Tfun, P0; batch_size = div(length(P0), Distributed.nprocs()^2)) + return pmap(Tfun, P0; batch_size = div(length(P0), nprocs()^2)) end """ @@ -1575,22 +1573,22 @@ function materialbarriers(odefun, xspan, yspan, tspan, lcsp; end ### Some convenience functions -function flow(odefun::ODE.ODEFunction, u::Singularity, tspan; kwargs...) +function flow(odefun::ODEFunction, u::Singularity, tspan; kwargs...) newcoords = flow(odefun, u.coords, tspan; kwargs...) return Singularity.(newcoords, u.index) end -function flow(odefun::ODE.ODEFunction, curve::Vector{<:SVector}, tspan; kwargs...) +function flow(odefun::ODEFunction, curve::Vector{<:SVector}, tspan; kwargs...) newcurves = [flow(odefun, x, tspan; kwargs...) for x in curve] return map(eachindex(tspan)) do t [nc[t] for nc in newcurves] end end -function flow(odefun::ODE.ODEFunction, barrier::EllipticBarrier, tspan; kwargs...) +function flow(odefun::ODEFunction, barrier::EllipticBarrier, tspan; kwargs...) newcurves = flow(odefun, barrier.curve, tspan; kwargs...) newcores = flow(odefun, barrier.core, tspan; kwargs...) return EllipticBarrier.(newcurves, newcores, barrier.p, barrier.s) end -function flow(odefun::ODE.ODEFunction, vortex::EllipticVortex, tspan; kwargs...) +function flow(odefun::ODEFunction, vortex::EllipticVortex, tspan; kwargs...) newcenters = flow(odefun, vortex.center, tspan; kwargs...) newbarriers = [flow(odefun, barrier, tspan; kwargs...) for barrier in vortex.barriers] newbarriers2 = map(eachindex(tspan)) do t @@ -1740,7 +1738,7 @@ passed to `velocity`, for instance the interpolant of the interpolating velocity Clockwise-rotating vortices correspond to anticyclones in the Northern hemisphere and to cyclones in the Southern hemisphere. """ -function clockwise(barrier::EllipticBarrier, velo::ODE.ODEFunction{false}, t0; p=nothing) +function clockwise(barrier::EllipticBarrier, velo::ODEFunction{false}, t0; p=nothing) curve = barrier.curve result = 0.0 pprev = curve[end-1] diff --git a/src/exports.jl b/src/exports.jl index fe11060f..f850e766 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -7,18 +7,18 @@ export #ellipticLCS.jl Singularity, - getcoords, - getindices, - EllipticBarrier, - EllipticVortex, + # getcoords, + # getindices, + # EllipticBarrier, + # EllipticVortex, LCSParameters, S1Dist, P1Dist, - compute_singularities, - singularity_detection, - critical_point_detection, - compute_returning_orbit, - compute_closed_orbits, + # compute_singularities, + # singularity_detection, + # critical_point_detection, + # compute_returning_orbit, + # compute_closed_orbits, getvortices, ellipticLCS, constrainedLCS, @@ -41,21 +41,22 @@ export DM_heatflow, sparse_diff_op_family, sparse_diff_op, - kde_normalize!, + # kde_normalize!, row_normalize!, unionadjacency, - stationary_distribution, + # stationary_distribution, diffusion_coordinates, diffusion_distance, #dynamicmetrics STmetric, stmetric, - spdist, + # spdist, #FEMassembly.jl - assembleStiffnessMatrix, - assembleMassMatrix, + assemble, + Stiffness, + Mass, #gridfunctions.jl regular1dGridTypes, @@ -128,12 +129,10 @@ export adaptiveTOCollocation, #util.jl - PEuclidean, tensor_invariants, dof2node, kmeansresult2LCS, getH, - unzip, #velocityfields.jl interpolateVF, diff --git a/src/gridfunctions.jl b/src/gridfunctions.jl index e8c67b6e..6a41db3d 100644 --- a/src/gridfunctions.jl +++ b/src/gridfunctions.jl @@ -370,9 +370,9 @@ function irregularDelaunayGrid(nodes_in::AbstractVector{Vec{2,Float64}}; ctx = GridContext{2}(FEM.Triangle, nodes_in; on_torus=on_torus, on_cylinder=on_cylinder, LL=LL, UR=UR, ip=ip, kwargs...) if on_torus - bdata = BoundaryData(ctx, Dists.PeriodicEuclidean([UR .- LL...;])) + bdata = BoundaryData(ctx, PeriodicEuclidean([UR .- LL...;])) elseif on_cylinder - bdata = BoundaryData(ctx, Dists.PeriodicEuclidean([UR[1] - LL[1], Inf])) + bdata = BoundaryData(ctx, PeriodicEuclidean([UR[1] - LL[1], Inf])) else bdata = BoundaryData() end @@ -449,7 +449,7 @@ function regularDelaunayGrid( result.gridType = "regular PC Delaunay grid" end if !PC - bdata = BoundaryData(result, Dists.PeriodicEuclidean([(UR .- LL)...])) + bdata = BoundaryData(result, PeriodicEuclidean([UR[1] - LL[1], UR[2] - LL[2]])) else bdata = BoundaryData() end @@ -1126,24 +1126,24 @@ end #Helper type for keeping track of point numbers -struct NumberedPoint2D <: VD.AbstractPoint2D +struct NumberedPoint2D <: AbstractPoint2D x::Float64 y::Float64 id::Int64 NumberedPoint2D(x::Float64,y::Float64,k::Int64) = new(x,y,k) NumberedPoint2D(x::Float64,y::Float64) = new(x, y, 0) - NumberedPoint2D(p::VD.Point2D) = new(p.x, p.y, 0) + NumberedPoint2D(p::Point2D) = new(p.x, p.y, 0) NumberedPoint2D(p::Vec{2,Float64}) = new(p[1], p[2], 0) end -GP.Point(x::Real, y::Real, k::Int64) = NumberedPoint2D(x, y, k) -GP.Point2D(p::NumberedPoint2D) = Point2D(p.x, p.y) -GP.gety(p::NumberedPoint2D) = p.y -GP.getx(p::NumberedPoint2D) = p.x +GeometricalPredicates.Point(x::Real, y::Real, k::Int64) = NumberedPoint2D(x, y, k) +GeometricalPredicates.Point2D(p::NumberedPoint2D) = Point2D(p.x, p.y) +GeometricalPredicates.getx(p::NumberedPoint2D) = p.x +GeometricalPredicates.gety(p::NumberedPoint2D) = p.y #More or less equivalent to matlab's delaunay function, based on code from FEMDL.jl function delaunay2(x::Vector{<:Union{Vec{2,Float64},NTuple{2,Float64}}}) - width = VD.max_coord - VD.min_coord + width = max_coord - min_coord @inbounds begin max_x = maximum(map(v->v[1], x)) min_x = minimum(map(v->v[1], x)) @@ -1153,11 +1153,11 @@ function delaunay2(x::Vector{<:Union{Vec{2,Float64},NTuple{2,Float64}}}) scale_x = 0.9*width/(max_x - min_x) scale_y = 0.9*width/(max_y - min_y) n = length(x) - a = [NumberedPoint2D(VD.min_coord+(x[i][1] - min_x)*scale_x, VD.min_coord+(x[i][2]-min_y)*scale_y, i) for i in 1:n] + a = [NumberedPoint2D(min_coord+(x[i][1] - min_x)*scale_x, min_coord+(x[i][2]-min_y)*scale_y, i) for i in 1:n] for i in 1:n - @assert !(GP.getx(a[i]) < VD.min_coord || GP.gety(a[i]) > VD.max_coord) + @assert !(getx(a[i]) < min_coord || gety(a[i]) > max_coord) end - tess = VD.DelaunayTessellation2D{NumberedPoint2D}(n) + tess = DelaunayTessellation2D{NumberedPoint2D}(n) push!(tess, a) m = 0 for i in tess @@ -1317,8 +1317,8 @@ function FEM.generate_grid(::Type{FEM.Triangle}, (tri, triindex) = tri_iterator #It could be the the triangle in question is oriented the wrong way #We test this, and flip it if neccessary - J = Tensors.otimes((nodes_to_triangulate[switched_nodes_table[tri._b.id]] - nodes_to_triangulate[switched_nodes_table[tri._a.id]]), e1) - J += Tensors.otimes((nodes_to_triangulate[switched_nodes_table[tri._c.id]] - nodes_to_triangulate[switched_nodes_table[tri._a.id]]), e2) + J = otimes((nodes_to_triangulate[switched_nodes_table[tri._b.id]] - nodes_to_triangulate[switched_nodes_table[tri._a.id]]), e1) + J += otimes((nodes_to_triangulate[switched_nodes_table[tri._c.id]] - nodes_to_triangulate[switched_nodes_table[tri._a.id]]), e2) detJ = det(J) @assert detJ != 0 if detJ > 0 @@ -1461,8 +1461,8 @@ function FEM.generate_grid(::Type{FEM.QuadraticTriangle}, nodes_in::Vector{Vec{2 push!(nodes,center) end - J = Tensors.otimes((nodes_in[tri._b.id] - nodes_in[tri._a.id]) , e1) - J += Tensors.otimes((nodes_in[tri._c.id] - nodes_in[tri._a.id]) , e2) + J = otimes((nodes_in[tri._b.id] - nodes_in[tri._a.id]) , e1) + J += otimes((nodes_in[tri._c.id] - nodes_in[tri._a.id]) , e2) detJ = det(J) @assert det(J) != 0 diff --git a/src/odesolvers.jl b/src/odesolvers.jl index 87e547ab..02f0d120 100644 --- a/src/odesolvers.jl +++ b/src/odesolvers.jl @@ -4,19 +4,19 @@ ### LinearImplicitEuler: ################################################################################ -struct LinearImplicitEuler{CS,AD,F} <: ODE.OrdinaryDiffEqNewtonAlgorithm{CS,AD,Nothing} +struct LinearImplicitEuler{CS,AD,F} <: OrdinaryDiffEqNewtonAlgorithm{CS,AD,Nothing} linsolve::F end LinearImplicitEuler(; chunk_size = 0, autodiff = false, - linsolve = ODE.DEFAULT_LINSOLVE, + linsolve = DEFAULT_LINSOLVE, ) = LinearImplicitEuler{chunk_size,autodiff,typeof(linsolve)}(linsolve) -ODE.alg_order(::LinearImplicitEuler) = 1 -ODE.is_mass_matrix_alg(::LinearImplicitEuler) = true +OrdinaryDiffEq.alg_order(::LinearImplicitEuler) = 1 +OrdinaryDiffEq.is_mass_matrix_alg(::LinearImplicitEuler) = true mutable struct LinearImplicitEulerCache{uType,rateType,J,F} <: - ODE.OrdinaryDiffEqMutableCache # removed: @cache + OrdinaryDiffEqMutableCache # removed: @cache u::uType uprev::uType k::rateType @@ -25,7 +25,7 @@ mutable struct LinearImplicitEulerCache{uType,rateType,J,F} <: linsolve::F end -function ODE.alg_cache( +function OrdinaryDiffEq.alg_cache( alg::LinearImplicitEuler, u, rate_prototype, @@ -63,16 +63,16 @@ function DiffEqBase.initialize!(integrator, cache::LinearImplicitEulerCache) integrator.k[2] = integrator.fsallast end -ODE.@muladd function ODE.perform_step!( +@muladd function OrdinaryDiffEq.perform_step!( integrator, cache::LinearImplicitEulerCache, repeat_step = false, ) - ODE.@unpack t, dt, uprev, u, f, p = integrator - ODE.@unpack k = cache - alg = ODE.unwrap_alg(integrator, true) + @unpack t, dt, uprev, u, f, p = integrator + @unpack k = cache + alg = unwrap_alg(integrator, true) - if DiffEqBase.isconstant(f.f) + if isconstant(f.f) if cache.step cache.W = f.mass_matrix - dt*f.f cache.linsolve(vec(u), cache.W, vec(f.mass_matrix * k), true) @@ -81,7 +81,7 @@ ODE.@muladd function ODE.perform_step!( cache.linsolve(vec(u), cache.W, vec(f.mass_matrix * uprev), false) else L = f.f - DiffEqBase.update_coefficients!(L, u, p, t + dt) + update_coefficients!(L, u, p, t + dt) cache.W = f.mass_matrix - dt*L cache.linsolve(vec(u), cache.W, vec(f.mass_matrix * uprev), true) end @@ -92,15 +92,15 @@ end ### LinearMEBDF2: ################################################################################ -struct LinearMEBDF2{CS,AD,F} <: ODE.OrdinaryDiffEqNewtonAlgorithm{CS,AD,Nothing} +struct LinearMEBDF2{CS,AD,F} <: OrdinaryDiffEqNewtonAlgorithm{CS,AD,Nothing} linsolve::F end -LinearMEBDF2(; chunk_size=0, autodiff=false, linsolve=ODE.DEFAULT_LINSOLVE) = +LinearMEBDF2(; chunk_size=0, autodiff=false, linsolve=DEFAULT_LINSOLVE) = LinearMEBDF2{chunk_size,autodiff,typeof(linsolve)}(linsolve) -ODE.alg_order(::LinearMEBDF2) = 2 -ODE.is_mass_matrix_alg(::LinearMEBDF2) = true +OrdinaryDiffEq.alg_order(::LinearMEBDF2) = 2 +OrdinaryDiffEq.is_mass_matrix_alg(::LinearMEBDF2) = true -mutable struct LinearMEBDF2Cache{uType,rateType,J,F} <: ODE.OrdinaryDiffEqMutableCache +mutable struct LinearMEBDF2Cache{uType,rateType,J,F} <: OrdinaryDiffEqMutableCache u::uType uprev::uType z₁::rateType @@ -116,7 +116,7 @@ mutable struct LinearMEBDF2Cache{uType,rateType,J,F} <: ODE.OrdinaryDiffEqMutabl linsolve2::F end -function ODE.alg_cache( +function OrdinaryDiffEq.alg_cache( alg::LinearMEBDF2, u, rate_prototype, @@ -160,7 +160,7 @@ function ODE.alg_cache( ) end -function ODE.initialize!(integrator, cache::LinearMEBDF2Cache) +function DiffEqBase.initialize!(integrator, cache::LinearMEBDF2Cache) integrator.kshortsize = 2 integrator.fsalfirst = cache.fsalfirst integrator.fsallast = cache.k @@ -175,16 +175,16 @@ function ODE.initialize!(integrator, cache::LinearMEBDF2Cache) ) end -ODE.@muladd function ODE.perform_step!( +@muladd function OrdinaryDiffEq.perform_step!( integrator, cache::LinearMEBDF2Cache, repeat_step = false, ) - ODE.@unpack t, dt, uprev, u, f, p = integrator - ODE.@unpack k, z₁, z₂, tmp = cache - alg = ODE.unwrap_alg(integrator, true) + @unpack t, dt, uprev, u, f, p = integrator + @unpack k, z₁, z₂, tmp = cache + alg = unwrap_alg(integrator, true) - if DiffEqBase.isconstant(f.f) + if isconstant(f.f) if cache.step cache.W = f.mass_matrix - dt*f.f z₁ = f.mass_matrix * uprev @@ -192,13 +192,13 @@ ODE.@muladd function ODE.perform_step!( cache.step = false end - if f.mass_matrix == LinearAlgebra.I + if f.mass_matrix == I ##### STEP 1: cache.linsolve(vec(z₁), cache.W, vec(uprev), false) ##### STEP 2: cache.linsolve(vec(z₂), cache.W, vec(z₁), false) ### precalculation for STEP 3: - DiffEqBase.@.. tmp = -0.5z₂ + z₁ + 0.5uprev + @.. tmp = -0.5z₂ + z₁ + 0.5uprev z₁ .= tmp ### M ≠ I: else @@ -209,7 +209,7 @@ ODE.@muladd function ODE.perform_step!( mul!(tmp, f.mass_matrix, z₁) cache.linsolve(vec(z₂), cache.W, vec(tmp), false) # precalculation for STEP 3: - DiffEqBase.@.. tmp = -0.5z₂ + z₁ + 0.5uprev + @.. tmp = -0.5z₂ + z₁ + 0.5uprev mul!(z₁, f.mass_matrix, tmp) end ##### STEP 3: @@ -219,7 +219,7 @@ ODE.@muladd function ODE.perform_step!( else # time-dependent case L = f.f if cache.step - DiffEqBase.update_coefficients!(L, u, p, t + dt) + update_coefficients!(L, u, p, t + dt) cache.W = f.mass_matrix - dt*L cache.step = false end @@ -228,23 +228,23 @@ ODE.@muladd function ODE.perform_step!( ##### STEP 1: cache.linsolve(vec(z₁), cache.W, vec(uprev), true) ##### STEP 2: - DiffEqBase.update_coefficients!(L, u, p, t + 2dt) + update_coefficients!(L, u, p, t + 2dt) cache.W₂ = f.mass_matrix - dt*L cache.linsolve2(vec(z₂), cache.W₂, vec(z₁), true) # precalculation for STEP 3: - DiffEqBase.@.. tmp = -0.5z₂ + z₁ + 0.5uprev + @.. tmp = -0.5z₂ + z₁ + 0.5uprev z₁ .= tmp else ### M ≠ I ##### STEP 1: mul!(tmp, f.mass_matrix, uprev) cache.linsolve(vec(z₁), cache.W, vec(tmp), true) ##### STEP 2: - DiffEqBase.update_coefficients!(L, u, p, t + 2dt) + update_coefficients!(L, u, p, t + 2dt) cache.W₂ = f.mass_matrix - dt * L mul!(tmp, f.mass_matrix, z₁) cache.linsolve2(vec(z₂), cache.W₂, vec(tmp), true) # precalculation for STEP 3: - DiffEqBase.@.. tmp = -0.5z₂ + z₁ + 0.5uprev + @.. tmp = -0.5z₂ + z₁ + 0.5uprev mul!(z₁, f.mass_matrix, tmp) end ##### STEP 3: diff --git a/src/plotting.jl b/src/plotting.jl index 8d19c640..28f3227e 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -195,7 +195,7 @@ function compute_euler_to_lagrange_points_raw( x1, x2 = xi euler_to_lagrange_points_raw = SharedArray{Float64}(length(x2), length(x1), 2) - @sync @distributed for i in eachindex(x1) + @sync @distributed for i in eachindex(x1) for j in eachindex(x2) point = SVector{2}(x1[i], x2[j]) try @@ -494,7 +494,7 @@ RecipesBase.@userplot Plot_FTLE RecipesBase.@recipe function f( as::Plot_FTLE; tolerance=1e-4, - solver=ODE.BS5(), + solver=BS5(), # existing_plot=nothing, TODO 1.0 flip_y=false, check_inbounds=always_true, diff --git a/src/pointlocation.jl b/src/pointlocation.jl index 9c8862a8..db8adcd8 100644 --- a/src/pointlocation.jl +++ b/src/pointlocation.jl @@ -68,7 +68,7 @@ struct DelaunayCellLocator <: PointLocator scale_y::Float64 minx::Float64 miny::Float64 - tess::VD.DelaunayTessellation2D{NumberedPoint2D} + tess::DelaunayTessellation2D{NumberedPoint2D} extended_points::Vector{Vec{2,Float64}} point_number_table::Vector{Int} cell_number_table::Vector{Int} @@ -76,18 +76,18 @@ end function locatePoint(loc::DelaunayCellLocator, grid::FEM.Grid, x::Vec{2,Float64}) #TODO: can this work for x that are not Float64? - point_inbounds = NumberedPoint2D(VD.min_coord+(x[1]-loc.minx)*loc.scale_x, VD.min_coord+(x[2]-loc.miny)*loc.scale_y) - if min(point_inbounds.x, point_inbounds.y) < VD.min_coord || max(point_inbounds.x,point_inbounds.y) > VD.max_coord + point_inbounds = NumberedPoint2D(min_coord+(x[1]-loc.minx)*loc.scale_x, min_coord+(x[2]-loc.miny)*loc.scale_y) + if min(point_inbounds.x, point_inbounds.y) < min_coord || max(point_inbounds.x,point_inbounds.y) > max_coord throw(DomainError("point(s) outside of domain")) end - t_index = VD.findindex(loc.tess, point_inbounds) + t_index = findindex(loc.tess, point_inbounds) t = loc.tess._trigs[t_index] - if VD.isexternal(t) + if isexternal(t) throw(DomainError("triangle outside of domain")) end v1::Vec{2} = loc.extended_points[t._b.id] - loc.extended_points[t._a.id] v2::Vec{2} = loc.extended_points[t._c.id] - loc.extended_points[t._a.id] - J::Tensor{2,2,Float64,4} = Tensors.otimes(v1 , e1) + Tensors.otimes(v2 , e2) + J::Tensor{2,2,Float64,4} = otimes(v1 , e1) + otimes(v2 , e2) #J = [v1[1] v2[1]; v1[2] v2[2]] #TODO: rewrite this so that we actually find the cell in question and get the ids #From there (instead of from the tesselation). Then get rid of the permutation that @@ -102,7 +102,7 @@ struct P2DelaunayCellLocator <: PointLocator scale_y::Float64 minx::Float64 miny::Float64 - tess::VD.DelaunayTessellation2D{NumberedPoint2D} + tess::DelaunayTessellation2D{NumberedPoint2D} internal_triangles::Vector{Int} inv_internal_triangles::Vector{Int} point_number_table::Vector{Int} @@ -124,18 +124,18 @@ end function locatePoint(loc::P2DelaunayCellLocator, grid::FEM.Grid, x::Vec{2,Float64}) #TODO: can this work for x that are not Float64? - point_inbounds = NumberedPoint2D(VD.min_coord+(x[1]-loc.minx)*loc.scale_x,VD.min_coord+(x[2]-loc.miny)*loc.scale_y) - if min(point_inbounds.x, point_inbounds.y) < VD.min_coord || max(point_inbounds.x,point_inbounds.y) > VD.max_coord + point_inbounds = NumberedPoint2D(min_coord+(x[1]-loc.minx)*loc.scale_x,min_coord+(x[2]-loc.miny)*loc.scale_y) + if min(point_inbounds.x, point_inbounds.y) < min_coord || max(point_inbounds.x,point_inbounds.y) > max_coord throw(DomainError("point(s) outside of domain")) end - t = VD.findindex(loc.tess, point_inbounds) - if VD.isexternal(loc.tess._trigs[t]) + t = findindex(loc.tess, point_inbounds) + if isexternal(loc.tess._trigs[t]) throw(DomainError("triangle outside of domain")) end qTriangle = grid.cells[loc.inv_internal_triangles[t]] v1::Vec{2} = grid.nodes[qTriangle.nodes[2]].x - grid.nodes[qTriangle.nodes[1]].x v2::Vec{2} = grid.nodes[qTriangle.nodes[3]].x - grid.nodes[qTriangle.nodes[1]].x - J::Tensor{2,2,Float64,4} = Tensors.otimes(v1 , e1) + Tensors.otimes(v2 , e2) + J::Tensor{2,2,Float64,4} = otimes(v1 , e1) + otimes(v2 , e2) #TODO: Think about whether doing it like this (with the permutation) is sensible return (inv(J) ⋅ (x - grid.nodes[qTriangle.nodes[1]].x)), loc.point_number_table[[permute!(collect(qTriangle.nodes),[2,3,1,5,6,4])]],1 end diff --git a/src/pullbacktensors.jl b/src/pullbacktensors.jl index d378f7fb..7c39360e 100644 --- a/src/pullbacktensors.jl +++ b/src/pullbacktensors.jl @@ -1,7 +1,7 @@ # Functions for pulling back tensors const default_tolerance = 1e-3 -const default_solver = ODE.BS5() +const default_solver = Tsit5() # TODO: exploit throughout struct Trajectory{dim,ts,T,N} @@ -28,16 +28,16 @@ julia> f = u -> flow((u, p, t) -> vf(u, p, t), u, range(0., stop=100, length=21) ``` """ @inline function flow(odefun, u0, tspan; kwargs...) - return flow(ODE.ODEFunction(odefun), u0, tspan; kwargs...) + return flow(ODEFunction(odefun), u0, tspan; kwargs...) end -@inline function flow(odefun::ODE.ODEFunction{true}, u0::Union{Tuple{Vararg{<:Number}},AbstractVecOrMat{<:Number}}, tspan; kwargs...) +@inline function flow(odefun::ODEFunction{true}, u0::Union{Tuple{Vararg{<:Number}},AbstractVecOrMat{<:Number}}, tspan; kwargs...) return _flow(odefun, convert(Vector, u0), tspan; kwargs...) end -@inline function flow(odefun::ODE.ODEFunction{false}, u0::Union{Tuple{Vararg{<:Number}},AbstractVecOrMat{<:Number}}, tspan; kwargs...) +@inline function flow(odefun::ODEFunction{false}, u0::Union{Tuple{Vararg{<:Number}},AbstractVecOrMat{<:Number}}, tspan; kwargs...) return _flow(odefun, convert(SVector{length(u0)}, u0), tspan; kwargs...) end @inline function _flow( - odefun::ODE.ODEFunction, + odefun::ODEFunction, u0::T, tspan; p = nothing, @@ -61,12 +61,12 @@ end # function affect!(integrator) # return terminate!(integrator)# # end - # callback = ODE.CallbackSet( - # map(x-> ODE.DiscreteCallback(x,affect!), + # callback = CallbackSet( + # map(x-> DiscreteCallback(x,affect!), # [leftSide,rightSide,topSide,bottomSide])...) #end - prob = ODE.ODEProblem(odefun, u0, (first(tspan), last(tspan)), p) - sol = ODE.solve( + prob = ODEProblem(odefun, u0, (first(tspan), last(tspan)), p) + sol = solve( prob, solver; saveat = saveat, @@ -91,7 +91,7 @@ Return the time-resolved base trajectory and its associated linearized flow maps dim = length(x) !(dim ∈ (2, 3)) && throw(ArgumentError("length(u) ∉ [2,3]")) return linearized_flow( - ODE.ODEFunction(odefun), + ODEFunction(odefun), convert(SVector{dim}, x), tspan, δ; @@ -99,7 +99,7 @@ Return the time-resolved base trajectory and its associated linearized flow maps ) end function linearized_flow( - odefun::ODE.ODEFunction{iip}, + odefun::ODEFunction{iip}, x::T, tspan, δ; @@ -111,7 +111,7 @@ function linearized_flow( if δ != 0 # use finite differencing stencil = [x[1], x[2], x[1] + δ, x[2], x[1], x[2] + δ, x[1] - δ, x[2], x[1], x[2] - δ] - rhs = ODE.ODEFunction{iip}((du, u, p, t) -> arraymap!(du, u, p, t, odefun, 5, 2)) + rhs = ODEFunction{iip}((du, u, p, t) -> arraymap!(du, u, p, t, odefun, 5, 2)) sol = _flow(rhs, stencil, tspan; tolerance = tolerance, solver = solver, p = p) return ( map(s -> SVector{2}(s[1], s[2]), sol), @@ -142,7 +142,7 @@ function linearized_flow( x[1], x[2] - δ, ) - srhs = ODE.ODEFunction{iip}((u, p, t) -> arraymap2(u, p, t, odefun)) + srhs = ODEFunction{iip}((u, p, t) -> arraymap2(u, p, t, odefun)) ssol = _flow(srhs, sstencil, tspan; tolerance = tolerance, solver = solver, p = p) return ( @@ -160,7 +160,7 @@ function linearized_flow( end # iip end function linearized_flow( - odefun::ODE.ODEFunction{iip}, + odefun::ODEFunction{iip}, x::T, tspan, δ; @@ -193,7 +193,7 @@ function linearized_flow( x[2], x[3] - δ, ] - rhs = ODE.ODEFunction{iip}((du, u, p, t) -> arraymap!(du, u, p, t, odefun, 7, 3)) + rhs = ODEFunction{iip}((du, u, p, t) -> arraymap!(du, u, p, t, odefun, 7, 3)) sol = _flow(rhs, stencil, tspan; tolerance = tolerance, solver = solver, p = p) return ( map(s -> SVector{3}(s[1], s[2], s[3]), sol), @@ -249,7 +249,7 @@ function linearized_flow( x[2], x[3] - δ, ) - srhs = ODE.ODEFunction{iip}((u, p, t) -> arraymap3(u, p, t, odefun)) + srhs = ODEFunction{iip}((u, p, t) -> arraymap3(u, p, t, odefun)) ssol = _flow(srhs, sstencil, tspan; tolerance = tolerance, solver = solver, p = p) return ( @@ -294,7 +294,7 @@ documentation for the meaning of `δ`. * `kwargs`: are passed to `linearized_flow` """ function mean_diff_tensor(odefun, u, tspan, δ; kwargs...) - return mean(Tensors.dott.(inv.(linearized_flow(odefun, u, tspan, δ; kwargs...)[2]))) + return mean(dott.(inv.(linearized_flow(odefun, u, tspan, δ; kwargs...)[2]))) end """ @@ -311,7 +311,7 @@ documentation for the meaning of `δ`. * `kwargs`: are passed to `linearized_flow` """ function CG_tensor(odefun, u, tspan, δ; kwargs...) - Tensors.tdot(linearized_flow(odefun, u, [tspan[1], tspan[end]], δ; kwargs...)[2][end]) + tdot(linearized_flow(odefun, u, [tspan[1], tspan[end]], δ; kwargs...)[2][end]) end """ @@ -349,7 +349,7 @@ right Cauchy-Green strain tensor. Linearized flow maps are computed with """ function pullback_metric_tensor(odefun, u, tspan, δ; G::F=(_ -> one(SymmetricTensor{2,2})), kwargs...) where {F} pos, DF = linearized_flow(odefun, u, tspan, δ; kwargs...) - return [Tensors.symmetric(transpose(DF[i]) ⋅ G(pos[i]) ⋅ DF[i]) for i in eachindex(pos, DF)] + return [symmetric(transpose(DF[i]) ⋅ G(pos[i]) ⋅ DF[i]) for i in eachindex(pos, DF)] end """ @@ -369,7 +369,7 @@ documentation for the meaning of `δ`. function pullback_diffusion_tensor(odefun, u, tspan, δ; D::F=(_ -> one(SymmetricTensor{2,2})), kwargs...) where {F} pos, DF = linearized_flow(odefun, u, tspan, δ; kwargs...) DFinv = inv.(DF) - return [Tensors.symmetric(DFinv[i] ⋅ D(pos[i]) ⋅ transpose(DFinv[i])) for i in eachindex(pos, DF)] + return [symmetric(DFinv[i] ⋅ D(pos[i]) ⋅ transpose(DFinv[i])) for i in eachindex(pos, DF)] end """ @@ -411,7 +411,7 @@ documentation for the meaning of `δ`. function av_weighted_CG_tensor(odefun, u, tspan, δ; D::F=(_ -> one(SymmetricTensor{2,2})), kwargs...) where {F} pos, DF = linearized_flow(odefun, u, tspan, δ; kwargs...) return mean( - (det(D(pos[i]))*Tensors.symmetric(transpose(DF[i])⋅inv(D(pos[i]))⋅DF[i]) for i in eachindex(pos, DF)) + (det(D(pos[i]))*symmetric(transpose(DF[i])⋅inv(D(pos[i]))⋅DF[i]) for i in eachindex(pos, DF)) ) end @@ -425,21 +425,21 @@ function pullback_tensors_geo(odefun, u, tspan, δ; D::SymmetricTensor{2}=one(Sy DF = linearized_flow(odefun, u, tspan, δ; kwargs...) PBmet = [deg2met(xi) ⋅ DFi ⋅ met2deg_init for (xi, DFi) in DF] PBdiff = [inv(deg2met(xi) ⋅ DFi) for (xi, DFi) in DF] - return [Tensors.symmetric(transpose(pb) ⋅ G ⋅ pb) for pb in PBmet], - [Tensors.symmetric(pb ⋅ D ⋅ transpose(pb)) for pb in PBdiff] + return [symmetric(transpose(pb) ⋅ G ⋅ pb) for pb in PBmet], + [symmetric(pb ⋅ D ⋅ transpose(pb)) for pb in PBdiff] end function pullback_metric_tensor_geo(odefun, u, tspan, δ; G::SymmetricTensor{2}=one(SymmetricTensor{2,2}), kwargs...) met2deg_init = met2deg(u) DF = linearized_flow(odefun, u, tspan, δ; kwargs...) PB = [deg2met(xi) ⋅ DFi ⋅ met2deg_init for (xi, DFi) in DF] - return [Tensors.symmetric(transpose(pb) ⋅ G ⋅ pb) for pb in PB] + return [symmetric(transpose(pb) ⋅ G ⋅ pb) for pb in PB] end function pullback_diffusion_tensor_geo(odefun, u, tspan, δ; D::SymmetricTensor{2}=one(SymmetricTensor{2,2}), kwargs...) DF = linearized_flow(odefun, u, tspan, δ; kwargs...) PB = [inv(deg2met(xi) ⋅ DFi) for (xi, DFi) in DF] - return [Tensors.symmetric(pb ⋅ D ⋅ transpose(pb)) for pb in PB] + return [symmetric(pb ⋅ D ⋅ transpose(pb)) for pb in PB] end # TODO: this is probably broken, the diffusion tensor is not used! diff --git a/src/util.jl b/src/util.jl index 3c3cbdb8..f77708c5 100644 --- a/src/util.jl +++ b/src/util.jl @@ -1,8 +1,6 @@ # (c) 2017-2021 Nathanael Schilling & Daniel Karrasch # Various utility functions -const PEuclidean = Distances.PeriodicEuclidean - @enum BisectionStatus::Int zero_found=0 maxiters_exceeded=1 nans_between=2 no_real_root=3 # bisection is used in closed orbit detection in ellipticLCS.jl @@ -66,7 +64,7 @@ the result in the corresponding slice of `du`. This is so that a decoupled ODE system with several initial values can be solved without having to call the ODE solver multiple times. """ -function arraymap!(du, u, p, t, odefun::ODE.ODEFunction{true}, N::Int, dim::Int) +function arraymap!(du, u, p, t, odefun::ODEFunction{true}, N::Int, dim::Int) @boundscheck eachindex(du, u) == Base.OneTo(N*dim) || throw( DimensionMismatch("vector arguments must have equal axes") ) @@ -154,8 +152,8 @@ end Returns a linear interpolant of the `AxisArray` `A` (without extrapolation). """ -function ITP.LinearInterpolation(A::AxisArray) - ITP.scale(ITP.interpolate(A.data, ITP.BSpline(ITP.Linear())), axisvalues(A)...) +function Interpolations.LinearInterpolation(A::AxisArray) + scale(interpolate(A.data, BSpline(Linear())), axisvalues(A)...) end """ @@ -163,8 +161,8 @@ end Returns a cubic spline interpolant of the `AxisArray` `A` (without extrapolation). """ -function ITP.CubicSplineInterpolation(A::AxisArray) - ITP.scale(ITP.interpolate(A.data, ITP.BSpline(ITP.Cubic(ITP.Natural(ITP.OnGrid())))), +function Interpolations.CubicSplineInterpolation(A::AxisArray) + scale(interpolate(A.data, BSpline(Cubic(Natural(OnGrid())))), axisvalues(A)...) end @@ -242,8 +240,8 @@ end #Unit Vectors in R^2 -const e1 = Tensors.basevec(Vec{2}, 1) -const e2 = Tensors.basevec(Vec{2}, 2) +const e1 = basevec(Vec{2}, 1) +const e2 = basevec(Vec{2}, 2) function rawInvCGTensor(args...; kwargs...) result = invCGTensor(args...; kwargs...) @@ -315,16 +313,16 @@ goodmod(a, b) = Base.mod(a, b) # end #TODO: Document this -function unzip(A::Array{T}) where {T} - res = map(x -> x[], T.parameters) - res_len = length(res) - for t in A - for i in 1:res_len - push!(res[i], t[i]) - end - end - res -end +# function unzip(A::Array{T}) where {T} +# res = map(x -> x[], T.parameters) +# res_len = length(res) +# for t in A +# for i in 1:res_len +# push!(res[i], t[i]) +# end +# end +# res +# end _getside(a, b, c) = sign((a[1] - c[1])*(b[2] - c[2]) - (b[1] - c[1])*(a[2] - c[2])) diff --git a/src/velocityfields.jl b/src/velocityfields.jl index e37f11af..ebd2e4cd 100644 --- a/src/velocityfields.jl +++ b/src/velocityfields.jl @@ -3,7 +3,7 @@ # interpolated vector field components """ - interpolateVF(xspan, yspan, tspan, u, v, itp_type=ITP.BSpline(ITP.Cubic(ITP.Free())))) -> VI + interpolateVF(xspan, yspan, tspan, u, v, itp_type=BSpline(Cubic(Free())))) -> VI `xspan`, `yspan` and `tspan` span the space-time domain on which the velocity-components `u` and `v` are given. `u` corresponds to the ``x``- or @@ -16,7 +16,7 @@ documentation for how to declare other interpolation types. julia> uv = interpolateVF(xs, ys, ts, u, v) julia> uv(x, y, t) -2-element SArray{Tuple{2},Float64,1,2} with indices SOneTo(2): +2-element SVector{2, Float64} with indices SOneTo(2): -44.23554926984537 -4.964069022198859 ``` @@ -26,10 +26,10 @@ function interpolateVF(X::AbstractRange{S1}, T::AbstractRange{S1}, U::AbstractArray{S2,3}, V::AbstractArray{S2,3}, - itp_type=ITP.BSpline(ITP.Cubic(ITP.Free(ITP.OnGrid()))) + itp_type=BSpline(Cubic(Free(OnGrid()))) ) where {S1 <: Real, S2 <: Real} UV = map(SVector{2,S2}, U, V)::Array{SVector{2,S2},3} - return ITP.scale(ITP.interpolate(UV, itp_type), X, Y, T) + return scale(interpolate(UV, itp_type), X, Y, T) end """ @@ -48,7 +48,7 @@ julia> f = u -> flow(interp_rhs, u, tspan; p=UI) julia> mCG_tensor = u -> CG_tensor(interp_rhs, u, tspan, δ; p=UI) ``` """ -const interp_rhs = ODE.ODEFunction{false}((u, p, t) -> p(u[1], u[2], t)) +const interp_rhs = ODEFunction{false}((u, p, t) -> p(u[1], u[2], t)) """ interp_rhs!(du, u, p, t) -> Vector @@ -66,7 +66,7 @@ julia> f = u -> flow(interp_rhs!, u, tspan; p=UI) julia> mCG_tensor = u -> CG_tensor(interp_rhs!, u, tspan, δ; p=UI) ``` """ -const interp_rhs! = ODE.ODEFunction{true}((du, u, p, t) -> du .= p(u[1], u[2], t)) +const interp_rhs! = ODEFunction{true}((du, u, p, t) -> du .= p(u[1], u[2], t)) # standard map const standard_a = 0.971635 @@ -102,7 +102,7 @@ function ABC_flow(u, p, t) C * sin(u[2]) + B * cos(u[1]) ) end -const abcFlow = ODE.ODEFunction{false}(ABC_flow) +const abcFlow = ODEFunction{false}(ABC_flow) # cylinder flow [Froyland, Lloyd, and Santitissadeekorn, 2010] function _cylinder_flow(u, p, t) @@ -120,4 +120,4 @@ function _cylinder_flow(u, p, t) A(t) * cos(x - ν * t) * sin(y) ) end -const cylinder_flow = ODE.ODEFunction{false}(_cylinder_flow) +const cylinder_flow = ODEFunction{false}(_cylinder_flow) diff --git a/test/define_vector_fields.jl b/test/define_vector_fields.jl index 55b17adb..67c75a7f 100644 --- a/test/define_vector_fields.jl +++ b/test/define_vector_fields.jl @@ -3,7 +3,7 @@ const ODE = OrdinaryDiffEq heaviside(x) = x > 0 ? one(x) : zero(x) -rot_double_gyre = ODE.ODEFunction{false}((u, p, t) -> begin +rot_double_gyre = ODEFunction{false}((u, p, t) -> begin SVector{2}(-((2*π*sin(u[1]*π)*cos(2*u[2]*π) *(t ^ 2*(3 - 2t)*heaviside(1 - t) *heaviside(t) + heaviside(-1 + t)) @@ -19,7 +19,7 @@ rot_double_gyre = ODE.ODEFunction{false}((u, p, t) -> begin end ) -rot_double_gyre! = ODE.ODEFunction{true}((du, u, p, t) -> begin +rot_double_gyre! = ODEFunction{true}((du, u, p, t) -> begin du[1] = -((2*π*sin(u[1]*π)*cos(2*u[2]*π)*(t ^ 2*(3 - 2t)*heaviside(1 - t)*heaviside(t) + heaviside(-1 + t)) + π*cos(u[2]*π)*sin(2*u[1]*π)*(1 - (t ^ 2*(3 - 2t)*heaviside(1 - t)*heaviside(t) + heaviside(-1 + t))))) du[2] = 2*π*sin(u[2]*π)*cos(2*u[1]*π)*(1 - (t ^ 2*(3 - 2t)*heaviside(1 - t)*heaviside(t) + heaviside(-1 + t))) @@ -28,14 +28,14 @@ rot_double_gyre! = ODE.ODEFunction{true}((du, u, p, t) -> begin end ) -bickleyJet = ODE.ODEFunction{false}((u, p, t) -> begin +bickleyJet = ODEFunction{false}((u, p, t) -> begin SVector{2}( -((-0.00012532*(0.0075*cos(0.313922461152095*(u[1] - t*(2.888626e-5 - 1.604096e-5*(-1 + √5)))) + 0.15*cos(0.627844922304191*(-1.28453e-5t + u[1])) + 0.3*cos(0.941767383456286*(-2.888626e-5t + u[1])))*sech(0.564971751412429*u[2]) ^ 2*tanh(0.564971751412429*u[2]) - 6.266e-5*(1 - tanh(0.564971751412429*u[2]) ^ 2))), 0.0001109082*((-0.00235441845864072*sin(0.313922461152095*(u[1] - t*(2.888626e-5 - 1.604096e-5*(-1 + √5)))) - 0.0941767383456286*sin(0.627844922304191*(-1.28453e-5t + u[1]))) - 0.282530215036886*sin(0.941767383456286*(-2.888626e-5t + u[1])))*sech(0.564971751412429*u[2]) ^ 2) end ) -bickleyJet! = ODE.ODEFunction{true}((du, u, p, t) -> begin +bickleyJet! = ODEFunction{true}((du, u, p, t) -> begin du[1] = -((-0.00012532*(0.0075*cos(0.313922461152095*(u[1] - t*(2.888626e-5 - 1.604096e-5*(-1 + √5)))) + 0.15*cos(0.627844922304191*(-1.28453e-5t + u[1])) + 0.3*cos(0.941767383456286*(-2.888626e-5t + u[1])))*sech(0.564971751412429*u[2]) ^ 2*tanh(0.564971751412429*u[2]) - 6.266e-5*(1 - tanh(0.564971751412429*u[2]) ^ 2))) du[2] = 0.0001109082*((-0.00235441845864072*sin(0.313922461152095*(u[1] - t*(2.888626e-5 - 1.604096e-5*(-1 + √5)))) - 0.0941767383456286*sin(0.627844922304191*(-1.28453e-5t + u[1]))) - 0.282530215036886*sin(0.941767383456286*(-2.888626e-5t + u[1])))*sech(0.564971751412429*u[2]) ^ 2 return du diff --git a/test/test_advectiondiffusion.jl b/test/test_advectiondiffusion.jl index b8230af8..5f1211ee 100644 --- a/test/test_advectiondiffusion.jl +++ b/test/test_advectiondiffusion.jl @@ -4,7 +4,7 @@ include("define_vector_fields.jl") @testset "FEMheatflow" begin ctx, _ = regularTriangularGrid((100, 100)) - M = assembleMassMatrix(ctx) + M = assemble(Mass(), ctx) U = FEM_heatflow(rot_double_gyre!, ctx, range(0., stop=1., length=11), 1e-3; factor=true) λ, V = diffusion_coordinates(U, 6) @test first(λ) ≈ 1 rtol=1e-6 @@ -18,4 +18,4 @@ end U = DM_heatflow(u -> flow(rot_double_gyre, u, tspan), particles, Neighborhood(0.1), gaussian(0.1)) λ, V = diffusion_coordinates(U, 6) @test first(λ) ≈ 1 -end \ No newline at end of file +end diff --git a/test/test_elliptic.jl b/test/test_elliptic.jl index d1fb2135..834e7a67 100644 --- a/test/test_elliptic.jl +++ b/test/test_elliptic.jl @@ -8,8 +8,8 @@ const CS = CoherentStructures @test sing.coords.data == coords @test sing.index == 1 @test sing == Singularity(SVector{2}(coords), 1//1) - @test getcoords([sing])[1].data == coords - @test getindices([sing])[1] == 1 + @test CS.getcoords([sing])[1].data == coords + @test CS.getindices([sing])[1] == 1 end @testset "circle distances" begin @@ -26,24 +26,24 @@ end for v in (AxisArray(SVector{2}.(x, y'), x, y), AxisArray(SVector{2}.(-x, -y'), x, y), AxisArray(SVector{2}.(-y', x), x, y)) - S = @inferred compute_singularities(v) + S = @inferred CS.compute_singularities(v) @test length(S) == 1 @test iszero(S[1].coords) @test S[1].index == 1 for mh in ((), (Combine20(),), (Combine20Aggressive(),), (Combine31(),)) - S = @inferred critical_point_detection(v, 0.1; merge_heuristics=mh) + S = @inferred CS.critical_point_detection(v, 0.1; merge_heuristics=mh) @test length(S) == 1 @test iszero(S[1].coords) @test S[1].index == 1 end end v = AxisArray(SVector{2}.(x, -y'), x, y) - S = @inferred compute_singularities(v) + S = @inferred CS.compute_singularities(v) @test length(S) == 1 @test iszero(S[1].coords) @test S[1].index == -1 for mh in ((), (Combine20(),), (Combine20Aggressive(),), (Combine31(),)) - S = @inferred critical_point_detection(v, 0.1; merge_heuristics=mh) + S = @inferred CS.critical_point_detection(v, 0.1; merge_heuristics=mh) @test length(S) == 1 @test iszero(S[1].coords) @test S[1].index == -1 @@ -70,24 +70,24 @@ T = @inferred map(mCG_tensor, P) Taxis = AxisArray(T, xspan, yspan) @testset "combine singularities" begin - @inferred singularity_detection(T, xspan, yspan, 0.1; merge_heuristics=()) + @inferred CS.singularity_detection(T, xspan, yspan, 0.1; merge_heuristics=()) ξ = map(t -> convert(SVector{2}, eigvecs(t)[:,1]), T) - singularities = @inferred compute_singularities(ξ, xspan, yspan, P1Dist()) + singularities = @inferred CS.compute_singularities(ξ, xspan, yspan, P1Dist()) new_singularities = @inferred CS.Combine(3*step(xspan))(singularities) for mh in (Combine20(), Combine20Aggressive(), Combine31()) @inferred mh(new_singularities) end r₁ , r₂ = 2*rand(2) - @test sum(getindices(CS.Combine(r₁)(singularities))) == - sum(getindices(CS.Combine(r₂)(singularities))) == - sum(getindices(CS.Combine(2)(singularities))) + @test sum(CS.getindices(CS.Combine(r₁)(singularities))) == + sum(CS.getindices(CS.Combine(r₂)(singularities))) == + sum(CS.getindices(CS.Combine(2)(singularities))) end @testset "closed orbit detection" begin Ω = SMatrix{2,2}(0, 1, -1, 0) vf(λ) = OrdinaryDiffEq.ODEFunction{false}((u, p, t) -> (Ω - (1.0 - λ) * I) * u) seed = SVector{2}(rand(), 0) - ret, info = @inferred compute_returning_orbit(vf(1.0), seed) + ret, info = @inferred CS.compute_returning_orbit(vf(1.0), seed) @test info === :Terminated @test ret[1] ≈ seed d = @inferred CS.Poincaré_return_distance(vf(1.0), seed) @@ -110,16 +110,16 @@ end vortices, singularities = @inferred ellipticLCS(Taxis, p; outermost=true, verbose=false) vortex = vortices[1] flowvortex = flow(rot_double_gyre, vortex, tspan) - @test flowvortex isa Vector{EllipticVortex} + @test flowvortex isa Vector{CS.EllipticVortex} @test length(flowvortex) == length(tspan) flowbarrier = flow(rot_double_gyre, vortex.barriers[1], tspan) - @test flowbarrier isa Vector{EllipticBarrier} + @test flowbarrier isa Vector{CS.EllipticBarrier} @test length(flowbarrier) == length(tspan) fgvortex = flowgrow(rot_double_gyre, vortex, tspan, FlowGrowParams(maxdist=0.1, mindist=0.01)) - @test fgvortex isa Vector{EllipticVortex} + @test fgvortex isa Vector{CS.EllipticVortex} @test length(fgvortex) == length(tspan) fgbarrier = flowgrow(rot_double_gyre, vortex.barriers[1], tspan, FlowGrowParams()) - @test fgbarrier isa Vector{EllipticBarrier} + @test fgbarrier isa Vector{CS.EllipticBarrier} @test length(fgbarrier) == length(tspan) @test area(vortex) == area(vortex.barriers[end]) @test sum(map(v -> length(v.barriers), vortices)) == 2 @@ -145,7 +145,7 @@ end mhs = () end p = @inferred LCSParameters(1.0, 3*max(step(xspan), step(yspan)), mhs, 60, 0.5, 1.5, 1e-4) - @inferred critical_point_detection(q, p.indexradius; merge_heuristics=mhs) + @inferred CS.critical_point_detection(q, p.indexradius; merge_heuristics=mhs) constrainedLCS(q, p; verbose=false, debug=true) vortices, singularities = @inferred constrainedLCS(q, p; verbose=false) @@ -172,8 +172,8 @@ end square = SVector{2}.(vcat(vcat.(1, dense), vcat.(sparse, 1), vcat.(-1, sparse), vcat.(dense, -1)) .+ Ref(ones(2))) @test area(square) ≈ 4 rtol=5eps() @test centroid(square) ≈ ones(2) atol=5eps() - barrier = EllipticBarrier(circle, SVector{2}(0.0, 0.0), 1.0, true) - vortices = EllipticVortex(SVector{2}(0.0, 0.0), [barrier]) + barrier = CS.EllipticBarrier(circle, SVector{2}(0.0, 0.0), 1.0, true) + vortices = CS.EllipticVortex(SVector{2}(0.0, 0.0), [barrier]) LL, UR = extrema(barrier) @test area(vortices) == area(barrier) == area(circle) @test centroid(vortices) == centroid(barrier) == centroid(circle) @@ -183,7 +183,7 @@ end @test !clockwise(barrier, ODEFunction((u, p, t) -> SVector{2}(-u[2], u[1])), 0) @test clockwise(barrier, ODEFunction((u, p, t) -> SVector{2}(-u[2], u[1])), 0) == clockwise(vortices, ODEFunction((u, p, t) -> SVector{2}(-u[2], u[1])), 0) - barrier = EllipticBarrier(square, SVector{2}(1.0, 1.0), 1.0, true) + barrier = CS.EllipticBarrier(square, SVector{2}(1.0, 1.0), 1.0, true) LL, UR = extrema(barrier) @test LL ≈ zeros(2) @test UR ≈ 2ones(2) diff --git a/test/test_fem.jl b/test/test_fem.jl index a166e310..d3da90d3 100644 --- a/test/test_fem.jl +++ b/test/test_fem.jl @@ -1,4 +1,4 @@ -using CoherentStructures, Test, Tensors, Arpack, Random +using CoherentStructures, Test, Tensors, Arpack, Random, Distances @testset "1d grid piecewise_linear" begin ctx, _ = @inferred regular1dP1Grid(3) @@ -79,8 +79,8 @@ end # TODO: include (regular1dPCGrid, 1e-1), for grid in (regular1dP1Grid, regular1dP2Grid) ctx, _ = @inferred grid(200) - M = @inferred assembleMassMatrix(ctx) - K = @inferred assembleStiffnessMatrix(ctx) + M = @inferred assemble(Mass(), ctx) + K = @inferred assemble(Stiffness(), ctx) λ, v = eigs(-K, M, which = :SM, nev = 6) @test λ[1] ≈ 0 atol = √eps() @test λ[2:end] ≈ λᵢ[2:end] rtol = 1e-3 @@ -92,8 +92,8 @@ end # TODO: include (regular1dPCGrid, 1e-1), for (grid, tol) in ((regular1dP1Grid, 1e-3), (regular1dP2Grid, 1e-7)) ctx, _ = @inferred grid(200) - M = @inferred assembleMassMatrix(ctx; bdata = getHomDBCS(ctx)) - K = @inferred assembleStiffnessMatrix(ctx; bdata = getHomDBCS(ctx)) + M = @inferred assemble(Mass(), ctx; bdata = getHomDBCS(ctx)) + K = @inferred assemble(Stiffness(), ctx; bdata = getHomDBCS(ctx)) λ, v = eigs(-K, M, which = :SM, nev = 6) @test λ ≈ λᵢ rtol = 1e-3 end @@ -102,7 +102,7 @@ end @testset "1d non-adaptive TO" begin for grid in (regular1dP1Grid, regular1dP2Grid) ctx, _ = grid(200) - bdata = BoundaryData(ctx, PEuclidean([1.0])) + bdata = BoundaryData(ctx, PeriodicEuclidean([1.0])) finv = x -> Base.mod.(x .- √2, 1.0) ALPHAS, _ = nonAdaptiveTOCollocation( ctx, @@ -141,8 +141,8 @@ end #ctx, _ = regularP2TriangularGrid((50,50),LL,UR) #bdata = BoundaryData(ctx, PeriodicEuclidean([2π,2π])) - M = assembleMassMatrix(ctx, bdata = bdata) - S = assembleStiffnessMatrix(ctx, bdata = bdata) + M = assemble(Mass(), ctx, bdata = bdata) + S = assemble(Stiffness(), ctx, bdata = bdata) T = adaptiveTOCollocationStiffnessMatrix( ctx, standardMap; diff --git a/test/test_odesolvers.jl b/test/test_odesolvers.jl index da7ebab1..e4ee06df 100644 --- a/test/test_odesolvers.jl +++ b/test/test_odesolvers.jl @@ -54,8 +54,8 @@ using OrdinaryDiffEq, DiffEqDevTools, SparseArrays, LinearAlgebra ctx, _ = regularTriangularGrid((N, N)) circleFun = x -> (sqrt((x[1] - 0.5)^2 + (x[2] - 0.5)^2) < 0.1) ? 1.0 : 0.0 sol = CoherentStructures.advect_serialized_quadpoints(ctx, (0.0, 1.1), rot_double_gyre!, nothing, δ) - M = assembleMassMatrix(ctx) - A = assembleStiffnessMatrix(ctx) + M = assemble(Mass(), ctx) + A = assemble(Stiffness(), ctx) function update_coeffs!(A, u, p, t) vals = SparseArrays.nzvalview(A)