diff --git a/.travis.yml b/.travis.yml index bba4f55327..48aa742722 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,6 +31,16 @@ matrix: - CC=gcc-4.8 - CXX=g++-4.8 - TENSORFLOW_VERSION=1.14 + - python: 3.6 + env: + - CC=gcc-5 + - CXX=g++-5 + - TENSORFLOW_VERSION=1.14 + - python: 3.6 + env: + - CC=gcc-8 + - CXX=g++-8 + - TENSORFLOW_VERSION=1.14 - python: 3.7 env: - CC=gcc-5 diff --git a/README.md b/README.md index a5e11b4a17..59965dc881 100644 --- a/README.md +++ b/README.md @@ -9,8 +9,11 @@ - [License and credits](#license-and-credits) - [Deep Potential in a nutshell](#deep-potential-in-a-nutshell) - [Download and install](#download-and-install) + - [Easy installation methods](#easy-installation-methods) + - [With Docker](#with-docker) + - [With conda](#with-conda) + - [Offline packages](#offline-packages) - [Install the python interaction](#install-the-python-interface) - - [Easy installation methods](#easy-installation-methods) - [Install the Tensorflow's python interface](#install-the-tensorflows-python-interface) - [Install the DeePMD-kit's python interface](#install-the-deepmd-kits-python-interface) - [Install the C++ interface](#install-the-c-interface) @@ -83,11 +86,29 @@ In addition to building up potential energy models, DeePMD-kit can also be used Please follow our [github](https://github.com/deepmodeling/deepmd-kit) webpage to see the latest released version and development version. -## Install the python interface +## Easy installation methods +There various easy methods to install DeePMD-kit. Choose one that you prefer. If you want to build by yourself, jump to the next two sections. + +### With Docker +A docker for installing the DeePMD-kit on CentOS 7 is available [here](https://github.com/frankhan91/deepmd-kit_docker). + +### With conda +DeePMD-kit is avaiable with [conda](https://github.com/conda/conda). Install [Anaconda](https://www.anaconda.com/distribution/#download-section) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html) first. + +To install the CPU version: +```bash +conda install deepmd-kit=*=*cpu lammps-dp=*=*cpu -c deepmodeling +``` + +To install the GPU version containing [CUDA 10.0](https://docs.nvidia.com/deploy/cuda-compatibility/index.html#binary-compatibility__table-toolkit-driver): +```bash +conda install deepmd-kit=*=*gpu lammps-dp=*=*gpu -c deepmodeling +``` -### Easy installation methods -A docker for installing the DeePMD-kit on CentOS 7 is available [here](https://github.com/frankhan91/deepmd-kit_docker). We are currently working on installation methods using the `conda` package management system and `pip` tools. Hope these will come out soon. +### Offline packages +Both CPU and GPU version offline package are avaiable in [the Releases page](https://github.com/deepmodeling/deepmd-kit/releases). +## Install the python interface ### Install the Tensorflow's python interface First, check the python version and compiler version on your machine ```bash @@ -102,6 +123,14 @@ source $tensorflow_venv/bin/activate pip install --upgrade pip pip install --upgrade tensorflow==1.14.0 ``` +It is notice that everytime a new shell is started and one wants to use `DeePMD-kit`, the virtual environment should be activated by +```bash +source $tensorflow_venv/bin/activate +``` +if one wants to skip out of the virtual environment, he/she can do +```bash +deactivate +``` If one has multiple python interpreters named like python3.x, it can be specified by, for example ```bash virtualenv -p python3.7 $tensorflow_venv @@ -483,7 +512,7 @@ Running an MD simulation with LAMMPS is simpler. In the LAMMPS input file, one n pair_style deepmd graph.pb pair_coeff ``` -where `graph.pb` is the file name of the frozen model. The `pair_coeff` should be left blank. It should be noted that LAMMPS counts atom types starting from 1, therefore, all LAMMPS atom type will be firstly subtracted by 1, and then passed into the DeePMD-kit engine to compute the interactions. A detailed documentation of this pair style is [here](doc/lammps-pair-style-deepmd.md). +where `graph.pb` is the file name of the frozen model. The `pair_coeff` should be left blank. It should be noted that LAMMPS counts atom types starting from 1, therefore, all LAMMPS atom type will be firstly subtracted by 1, and then passed into the DeePMD-kit engine to compute the interactions. [A detailed documentation of this pair style is available.](doc/lammps-pair-style-deepmd.md). ### Long-range interaction The reciprocal space part of the long-range interaction can be calculated by LAMMPS command `kspace_style`. To use it with DeePMD-kit, one writes @@ -533,11 +562,7 @@ If other unexpected problems occur, you're welcome to contact us for help. When the version of DeePMD-kit used to training model is different from the that of DeePMD-kit running MDs, one has the problem of model compatability. -DeePMD-kit guarantees that the codes with the same major and minor revisions are compatible. That is to say v0.12.5 is compatible to v0.12.0, but is not compatible to v0.11.0. When way of fixing it is to restart the training with the new revisions and a slightly increased `stop_batch`, say 1,000,000 to 1,001,000 if the `save_freq` was set to 1,000. Typically one runs -```bash -dp train --restart model.ckpt revised_input.json -``` -and freeze the new model. +DeePMD-kit guarantees that the codes with the same major and minor revisions are compatible. That is to say v0.12.5 is compatible to v0.12.0, but is not compatible to v0.11.0 nor v1.0.0. ## Installation: inadequate versions of gcc/g++ Sometimes you may use a gcc/g++ of version <4.9. If you have a gcc/g++ of version > 4.9, say, 7.2.0, you may choose to use it by doing diff --git a/deepmd/__init__.py b/deepmd/__init__.py index 5d656dc0d4..b2711088e8 100644 --- a/deepmd/__init__.py +++ b/deepmd/__init__.py @@ -1,8 +1,9 @@ from .env import set_mkl -from .DeepEval import DeepEval -from .DeepPot import DeepPot -from .DeepPolar import DeepPolar -from .DeepWFC import DeepWFC +from .DeepEval import DeepEval +from .DeepPot import DeepPot +from .DeepDipole import DeepDipole +from .DeepPolar import DeepPolar +from .DeepWFC import DeepWFC set_mkl() diff --git a/doc/lammps-pair-style-deepmd.md b/doc/lammps-pair-style-deepmd.md index cc8d9724df..0a39e7362c 100644 --- a/doc/lammps-pair-style-deepmd.md +++ b/doc/lammps-pair-style-deepmd.md @@ -35,13 +35,13 @@ This pair style takes the deep potential defined in a model file that usually ha The model deviation evalulate the consistency of the force predictions from multiple models. By default, only the maximal, minimal and averge model deviations are output. If the key `atomic` is set, then the model deviation of force prediction of each atom will be output. -By default, the model deviation is output in absolute value. If the keyword `relative` is set, then the relative model deviation will be output, which is defined by +By default, the model deviation is output in absolute value. If the keyword `relative` is set, then the relative model deviation will be output. The relative model deviation of the force on atom `i` is defined by ```math - |Df| -Ef = ------------- - |f| + level + |Df_i| +Ef_i = ------------- + |f_i| + level ``` -where `Df` is the model deviation of a force, `|f|` is the norm of the force and `level` is provided as the parameter of the keyword `relative`. +where `Df_i` is the absolute model deviation of the force on atom `i`, `|f_i|` is the norm of the the force and `level` is provided as the parameter of the keyword `relative`. ## Restrictions @@ -50,6 +50,8 @@ where `Df` is the model deviation of a force, `|f|` is the norm of the force and - The `atom_style` of the system should be `atomic`. +- When using the `atomic` key word of `deepmd` is set, one should not use this pair style with MPI parallelization. + [DP]:https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.143001 [DP-SE]:https://arxiv.org/abs/1805.09003 diff --git a/examples/fparam/data/.gitignore b/examples/fparam/data/.gitignore index b440c7f944..3ce13d81e4 100644 --- a/examples/fparam/data/.gitignore +++ b/examples/fparam/data/.gitignore @@ -1,2 +1,2 @@ -*raw - +*.raw +convert_aparam.py diff --git a/examples/fparam/data/e3000_i2000/set.000/aparam.npy b/examples/fparam/data/e3000_i2000/set.000/aparam.npy new file mode 100644 index 0000000000..7c10e752b4 Binary files /dev/null and b/examples/fparam/data/e3000_i2000/set.000/aparam.npy differ diff --git a/examples/fparam/data/e3000_i2000/set.001/aparam.npy b/examples/fparam/data/e3000_i2000/set.001/aparam.npy new file mode 100644 index 0000000000..7c10e752b4 Binary files /dev/null and b/examples/fparam/data/e3000_i2000/set.001/aparam.npy differ diff --git a/examples/fparam/data/e3000_i2000/set.002/aparam.npy b/examples/fparam/data/e3000_i2000/set.002/aparam.npy new file mode 100644 index 0000000000..7c10e752b4 Binary files /dev/null and b/examples/fparam/data/e3000_i2000/set.002/aparam.npy differ diff --git a/examples/fparam/data/e8000_i2000/set.000/aparam.npy b/examples/fparam/data/e8000_i2000/set.000/aparam.npy new file mode 100644 index 0000000000..7eff3edb7a Binary files /dev/null and b/examples/fparam/data/e8000_i2000/set.000/aparam.npy differ diff --git a/examples/fparam/data/e8000_i2000/set.001/aparam.npy b/examples/fparam/data/e8000_i2000/set.001/aparam.npy new file mode 100644 index 0000000000..7eff3edb7a Binary files /dev/null and b/examples/fparam/data/e8000_i2000/set.001/aparam.npy differ diff --git a/examples/fparam/data/e8000_i2000/set.002/aparam.npy b/examples/fparam/data/e8000_i2000/set.002/aparam.npy new file mode 100644 index 0000000000..7eff3edb7a Binary files /dev/null and b/examples/fparam/data/e8000_i2000/set.002/aparam.npy differ diff --git a/examples/fparam/train/input.json b/examples/fparam/train/input.json index aa1498b06e..c57afdfb7f 100644 --- a/examples/fparam/train/input.json +++ b/examples/fparam/train/input.json @@ -1,6 +1,7 @@ { "_comment": " model parameters", "model" : { + "data_stat_nbatch": 1, "descriptor": { "type": "se_a", "sel": [60], diff --git a/examples/fparam/train/input_aparam.json b/examples/fparam/train/input_aparam.json new file mode 100644 index 0000000000..86be27ef29 --- /dev/null +++ b/examples/fparam/train/input_aparam.json @@ -0,0 +1,63 @@ +{ + "_comment": " model parameters", + "model" : { + "data_stat_nbatch": 1, + "descriptor": { + "type": "se_a", + "sel": [60], + "rcut_smth": 1.80, + "rcut": 6.00, + "neuron": [25, 50, 100], + "resnet_dt": false, + "axis_neuron": 8, + "seed": 1 + }, + "fitting_net" : { + "neuron": [120, 120, 120], + "resnet_dt": true, + "numb_aparam": 1, + "seed": 1 + } + }, + + "loss" : { + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + + "learning_rate" : { + "start_lr": 0.001, + "decay_steps": 5000, + "decay_rate": 0.95 + }, + + "_comment": " traing controls", + "training" : { + "systems": ["../data/e3000_i2000/", "../data/e8000_i2000/"], + "set_prefix": "set", + "stop_batch": 1000000, + "batch_size": 1, + + "seed": 1, + + "_comment": " display and restart", + "_comment": " frequencies counted in batch", + "disp_file": "lcurve.out", + "disp_freq": 100, + "numb_test": 10, + "save_freq": 1000, + "save_ckpt": "model.ckpt", + "load_ckpt": "model.ckpt", + "disp_training":true, + "time_training":true, + "profiling": false, + "profiling_file": "timeline.json" + }, + + "_comment": "that's all" +} + diff --git a/examples/water/train/polar.json b/examples/water/train/polar.json index d28f2b19f3..6a1558b124 100644 --- a/examples/water/train/polar.json +++ b/examples/water/train/polar.json @@ -3,6 +3,7 @@ "_comment": " model parameters", "model":{ "type_map": ["O", "H"], + "data_stat_nbatch": 1, "descriptor": { "type": "loc_frame", "sel_a": [16, 32], diff --git a/examples/water/train/polar_se_a.json b/examples/water/train/polar_se_a.json index bd816462d4..55899e564d 100644 --- a/examples/water/train/polar_se_a.json +++ b/examples/water/train/polar_se_a.json @@ -3,6 +3,7 @@ "_comment": " model parameters", "model":{ "type_map": ["O", "H"], + "data_stat_nbatch": 1, "descriptor" :{ "type": "se_a", "sel": [46, 92], @@ -16,7 +17,8 @@ }, "fitting_net": { "type": "polar", - "pol_type": [0], + "sel_type": [0], + "fit_diag": true, "neuron": [100, 100, 100], "resnet_dt": true, "seed": 1, @@ -27,7 +29,7 @@ "learning_rate" :{ "type": "exp", - "start_lr": 0.001, + "start_lr": 0.01, "decay_steps": 5000, "decay_rate": 0.95, "_comment": "that's all" diff --git a/examples/water/train/wannier.json b/examples/water/train/wannier.json index 251dd51803..e969675989 100644 --- a/examples/water/train/wannier.json +++ b/examples/water/train/wannier.json @@ -3,6 +3,7 @@ "_comment": " model parameters", "model":{ "type_map": ["O", "H"], + "data_stat_nbatch": 1, "descriptor": { "type": "loc_frame", "sel_a": [16, 32], diff --git a/pyproject.toml b/pyproject.toml index 9397654113..b1251c5890 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,3 @@ [build-system] -requires = ["setuptools", "wheel", "scikit-build", "cmake", "ninja"] +requires = ["setuptools", "wheel", "scikit-build", "cmake", "ninja", "m2r"] diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index ac41dae420..7b35067444 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -105,7 +105,11 @@ endif () # define USE_CUDA_TOOLKIT if (DEFINED USE_CUDA_TOOLKIT) - find_package(CUDA REQUIRED) + if (USE_CUDA_TOOLKIT) + find_package(CUDA REQUIRED) + else() + message(STATUS "Will not build nv GPU support") + endif() else() find_package(CUDA QUIET) if (CUDA_FOUND) @@ -120,6 +124,15 @@ if (USE_CUDA_TOOLKIT) add_definitions("-DUSE_CUDA_TOOLKIT") endif() +# define USE_TTM +if (NOT DEFINED USE_TTM) + set(USE_TTM FALSE) +endif (NOT DEFINED USE_TTM) +if (USE_TTM) + message(STATUS "Use TTM") + set(TTM_DEF "-DUSE_TTM") +endif (USE_TTM) + # define build type if ((NOT DEFINED CMAKE_BUILD_TYPE) OR CMAKE_BUILD_TYPE STREQUAL "") set (CMAKE_BUILD_TYPE release) diff --git a/source/lib/include/NNPInter.h b/source/lib/include/NNPInter.h index 08617a187f..a06faaa3b9 100644 --- a/source/lib/include/NNPInter.h +++ b/source/lib/include/NNPInter.h @@ -64,7 +64,8 @@ class NNPInter const vector & atype, const vector & box, const int nghost = 0, - const vector fparam = vector()); + const vector & fparam = vector(), + const vector & aparam = vector()); void compute (ENERGYTYPE & ener, vector & force, vector & virial, @@ -73,7 +74,8 @@ class NNPInter const vector & box, const int nghost, const LammpsNeighborList & lmp_list, - const vector fparam = vector()); + const vector & fparam = vector(), + const vector & aparam = vector()); void compute (ENERGYTYPE & ener, vector & force, vector & virial, @@ -82,7 +84,8 @@ class NNPInter const vector & coord, const vector & atype, const vector & box, - const vector fparam = vector()); + const vector & fparam = vector(), + const vector & aparam = vector()); void compute (ENERGYTYPE & ener, vector & force, vector & virial, @@ -93,10 +96,12 @@ class NNPInter const vector & box, const int nghost, const LammpsNeighborList & lmp_list, - const vector fparam = vector()); + const vector & fparam = vector(), + const vector & aparam = vector()); VALUETYPE cutoff () const {assert(inited); return rcut;}; int numb_types () const {assert(inited); return ntypes;}; int dim_fparam () const {assert(inited); return dfparam;}; + int dim_aparam () const {assert(inited); return daparam;}; private: Session* session; int num_intra_nthreads, num_inter_nthreads; @@ -109,6 +114,10 @@ class NNPInter VALUETYPE cell_size; int ntypes; int dfparam; + int daparam; + void validate_fparam_aparam(const int & nloc, + const vector &fparam, + const vector &aparam)const ; }; class NNPInterModelDevi @@ -125,7 +134,8 @@ class NNPInterModelDevi const vector & coord, const vector & atype, const vector & box, - const vector fparam = vector()); + const vector & fparam = vector(), + const vector & aparam = vector()); void compute (vector & all_ener, vector > & all_force, vector > & all_virial, @@ -134,7 +144,8 @@ class NNPInterModelDevi const vector & box, const int nghost, const LammpsNeighborList & lmp_list, - const vector fparam = vector()); + const vector & fparam = vector(), + const vector & aparam = vector()); void compute (vector & all_ener, vector > & all_force, vector > & all_virial, @@ -145,10 +156,12 @@ class NNPInterModelDevi const vector & box, const int nghost, const LammpsNeighborList & lmp_list, - const vector fparam = vector()); + const vector & fparam = vector(), + const vector & aparam = vector()); VALUETYPE cutoff () const {assert(inited); return rcut;}; int numb_types () const {assert(inited); return ntypes;}; int dim_fparam () const {assert(inited); return dfparam;}; + int dim_aparam () const {assert(inited); return daparam;}; #ifndef HIGH_PREC void compute_avg (ENERGYTYPE & dener, const vector & all_energy); @@ -176,6 +189,10 @@ class NNPInterModelDevi VALUETYPE cell_size; int ntypes; int dfparam; + int daparam; + void validate_fparam_aparam(const int & nloc, + const vector &fparam, + const vector &aparam)const ; }; diff --git a/source/lib/src/NNPInter.cc b/source/lib/src/NNPInter.cc index 385aa063d8..fa1f989b49 100644 --- a/source/lib/src/NNPInter.cc +++ b/source/lib/src/NNPInter.cc @@ -66,7 +66,8 @@ make_input_tensors (std::vector> & input_tensors, const vector & datype_, const vector & dbox, const VALUETYPE & cell_size, - const vector fparam_, + const vector & fparam_, + const vector & aparam_, const NNPAtomMap&nnpmap, const int nghost = 0) { @@ -127,15 +128,20 @@ make_input_tensors (std::vector> & input_tensors, TensorShape fparam_shape ; fparam_shape.AddDim (nframes); fparam_shape.AddDim (fparam_.size()); + TensorShape aparam_shape ; + aparam_shape.AddDim (nframes); + aparam_shape.AddDim (aparam_.size()); #ifdef HIGH_PREC Tensor coord_tensor (DT_DOUBLE, coord_shape); Tensor box_tensor (DT_DOUBLE, box_shape); Tensor fparam_tensor (DT_DOUBLE, fparam_shape); + Tensor aparam_tensor (DT_DOUBLE, aparam_shape); #else Tensor coord_tensor (DT_FLOAT, coord_shape); Tensor box_tensor (DT_FLOAT, box_shape); Tensor fparam_tensor (DT_FLOAT, fparam_shape); + Tensor aparam_tensor (DT_FLOAT, aparam_shape); #endif Tensor type_tensor (DT_INT32, type_shape); Tensor mesh_tensor (DT_INT32, mesh_shape); @@ -147,6 +153,7 @@ make_input_tensors (std::vector> & input_tensors, auto mesh = mesh_tensor.flat (); auto natoms = natoms_tensor.flat (); auto fparam = fparam_tensor.matrix (); + auto aparam = aparam_tensor.matrix (); vector dcoord (dcoord_); nnpmap.forward (dcoord.begin(), dcoord_.begin(), 3); @@ -164,6 +171,9 @@ make_input_tensors (std::vector> & input_tensors, for (int jj = 0; jj < fparam_.size(); ++jj){ fparam(ii, jj) = fparam_[jj]; } + for (int jj = 0; jj < aparam_.size(); ++jj){ + aparam(ii, jj) = aparam_[jj]; + } } mesh (1-1) = 0; mesh (2-1) = 0; @@ -183,26 +193,19 @@ make_input_tensors (std::vector> & input_tensors, natoms (1) = nall; for (int ii = 0; ii < ntypes; ++ii) natoms(ii+2) = type_count[ii]; - if (fparam_.size() == 0) { - input_tensors = { - {"t_coord", coord_tensor}, - {"t_type", type_tensor}, - {"t_box", box_tensor}, - {"t_mesh", mesh_tensor}, - {"t_natoms", natoms_tensor}, - }; + input_tensors = { + {"t_coord", coord_tensor}, + {"t_type", type_tensor}, + {"t_box", box_tensor}, + {"t_mesh", mesh_tensor}, + {"t_natoms",natoms_tensor}, + }; + if (fparam_.size() > 0) { + input_tensors.push_back({"t_fparam", fparam_tensor}); } - else { - input_tensors = { - {"t_coord", coord_tensor}, - {"t_type", type_tensor}, - {"t_box", box_tensor}, - {"t_mesh", mesh_tensor}, - {"t_natoms", natoms_tensor}, - {"t_fparam", fparam_tensor}, - }; + if (aparam_.size() > 0) { + input_tensors.push_back({"t_aparam", aparam_tensor}); } - return nloc; } @@ -213,7 +216,8 @@ make_input_tensors (std::vector> & input_tensors, const vector & datype_, const vector & dbox, InternalNeighborList & dlist, - const vector fparam_, + const vector & fparam_, + const vector & aparam_, const NNPAtomMap&nnpmap, const int nghost) { @@ -247,15 +251,20 @@ make_input_tensors (std::vector> & input_tensors, TensorShape fparam_shape ; fparam_shape.AddDim (nframes); fparam_shape.AddDim (fparam_.size()); + TensorShape aparam_shape ; + aparam_shape.AddDim (nframes); + aparam_shape.AddDim (aparam_.size()); #ifdef HIGH_PREC Tensor coord_tensor (DT_DOUBLE, coord_shape); Tensor box_tensor (DT_DOUBLE, box_shape); Tensor fparam_tensor (DT_DOUBLE, fparam_shape); + Tensor aparam_tensor (DT_DOUBLE, aparam_shape); #else Tensor coord_tensor (DT_FLOAT, coord_shape); Tensor box_tensor (DT_FLOAT, box_shape); Tensor fparam_tensor (DT_FLOAT, fparam_shape); + Tensor aparam_tensor (DT_FLOAT, aparam_shape); #endif Tensor type_tensor (DT_INT32, type_shape); Tensor mesh_tensor (DT_INT32, mesh_shape); @@ -267,6 +276,7 @@ make_input_tensors (std::vector> & input_tensors, auto mesh = mesh_tensor.flat (); auto natoms = natoms_tensor.flat (); auto fparam = fparam_tensor.matrix (); + auto aparam = aparam_tensor.matrix (); vector dcoord (dcoord_); nnpmap.forward (dcoord.begin(), dcoord_.begin(), 3); @@ -284,6 +294,9 @@ make_input_tensors (std::vector> & input_tensors, for (int jj = 0; jj < fparam_.size(); ++jj){ fparam(ii, jj) = fparam_[jj]; } + for (int jj = 0; jj < aparam_.size(); ++jj){ + aparam(ii, jj) = aparam_[jj]; + } } for (int ii = 0; ii < 16; ++ii) mesh(ii) = 0; @@ -303,24 +316,18 @@ make_input_tensors (std::vector> & input_tensors, natoms (1) = nall; for (int ii = 0; ii < ntypes; ++ii) natoms(ii+2) = type_count[ii]; - if (fparam_.size() == 0) { - input_tensors = { - {"t_coord", coord_tensor}, - {"t_type", type_tensor}, - {"t_box", box_tensor}, - {"t_mesh", mesh_tensor}, - {"t_natoms", natoms_tensor}, - }; + input_tensors = { + {"t_coord", coord_tensor}, + {"t_type", type_tensor}, + {"t_box", box_tensor}, + {"t_mesh", mesh_tensor}, + {"t_natoms", natoms_tensor}, + }; + if (fparam_.size() > 0) { + input_tensors.push_back({"t_fparam", fparam_tensor}); } - else { - input_tensors = { - {"t_coord", coord_tensor}, - {"t_type", type_tensor}, - {"t_box", box_tensor}, - {"t_mesh", mesh_tensor}, - {"t_natoms", natoms_tensor}, - {"t_fparam", fparam_tensor}, - }; + if (aparam_.size() > 0) { + input_tensors.push_back({"t_aparam", aparam_tensor}); } return nloc; @@ -520,9 +527,11 @@ init (const string & model, const int & gpu_rank) cell_size = rcut; ntypes = get_scalar("descrpt_attr/ntypes"); dfparam = get_scalar("fitting_attr/dfparam"); + daparam = get_scalar("fitting_attr/daparam"); assert(rcut == get_rcut()); assert(ntypes == get_ntypes()); if (dfparam < 0) dfparam = 0; + if (daparam < 0) daparam = 0; inited = true; } #else @@ -541,9 +550,11 @@ init (const string & model, const int & gpu_rank) cell_size = rcut; ntypes = get_scalar("descrpt_attr/ntypes"); dfparam = get_scalar("fitting_attr/dfparam"); + daparam = get_scalar("fitting_attr/daparam"); assert(rcut == get_rcut()); assert(ntypes == get_ntypes()); if (dfparam < 0) dfparam = 0; + if (daparam < 0) daparam = 0; // rcut = get_rcut(); // cell_size = rcut; // ntypes = get_ntypes(); @@ -583,6 +594,20 @@ get_scalar (const string & name) const return orc(0); } +void +NNPInter:: +validate_fparam_aparam(const int & nloc, + const vector &fparam, + const vector &aparam)const +{ + if (fparam.size() != dfparam) { + throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); + } + if (aparam.size() != daparam * nloc) { + throw std::runtime_error("the dim of atom parameter provided is not consistent with what the model uses"); + } +} + void NNPInter:: compute (ENERGYTYPE & dener, @@ -592,18 +617,17 @@ compute (ENERGYTYPE & dener, const vector & datype_, const vector & dbox, const int nghost, - const vector fparam) + const vector & fparam, + const vector & aparam) { int nall = dcoord_.size() / 3; int nloc = nall - nghost; NNPAtomMap nnpmap (datype_.begin(), datype_.begin() + nloc); assert (nloc == nnpmap.get_type().size()); - if (fparam.size() != dfparam) { - throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); - } + validate_fparam_aparam(nloc, fparam, aparam); std::vector> input_tensors; - int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, cell_size, fparam, nnpmap, nghost); + int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, cell_size, fparam, aparam, nnpmap, nghost); assert (ret == nloc); run_model (dener, dforce_, dvirial, session, input_tensors, nnpmap, nghost); @@ -619,22 +643,21 @@ compute (ENERGYTYPE & dener, const vector & dbox, const int nghost, const LammpsNeighborList & lmp_list, - const vector fparam) + const vector & fparam, + const vector & aparam) { int nall = dcoord_.size() / 3; int nloc = nall - nghost; NNPAtomMap nnpmap (datype_.begin(), datype_.begin() + nloc); assert (nloc == nnpmap.get_type().size()); - if (fparam.size() != dfparam) { - throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); - } + validate_fparam_aparam(nloc, fparam, aparam); InternalNeighborList nlist; convert_nlist_lmp_internal (nlist, lmp_list); shuffle_nlist (nlist, nnpmap); std::vector> input_tensors; - int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, nnpmap, nghost); + int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, aparam, nnpmap, nghost); assert (nloc == ret); run_model (dener, dforce_, dvirial, session, input_tensors, nnpmap, nghost); @@ -651,15 +674,14 @@ compute (ENERGYTYPE & dener, const vector & dcoord_, const vector & datype_, const vector & dbox, - const vector fparam) + const vector & fparam, + const vector & aparam) { NNPAtomMap nnpmap (datype_.begin(), datype_.end()); - if (fparam.size() != dfparam) { - throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); - } + validate_fparam_aparam(nnpmap.get_type().size(), fparam, aparam); std::vector> input_tensors; - int nloc = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, cell_size, fparam, nnpmap); + int nloc = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, cell_size, fparam, aparam, nnpmap); run_model (dener, dforce_, dvirial, datom_energy_, datom_virial_, session, input_tensors, nnpmap); } @@ -678,22 +700,21 @@ compute (ENERGYTYPE & dener, const vector & dbox, const int nghost, const LammpsNeighborList & lmp_list, - const vector fparam) + const vector & fparam, + const vector & aparam) { int nall = dcoord_.size() / 3; int nloc = nall - nghost; NNPAtomMap nnpmap (datype_.begin(), datype_.begin() + nloc); assert (nloc == nnpmap.get_type().size()); - if (fparam.size() != dfparam) { - throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); - } + validate_fparam_aparam(nloc, fparam, aparam); InternalNeighborList nlist; convert_nlist_lmp_internal (nlist, lmp_list); shuffle_nlist (nlist, nnpmap); std::vector> input_tensors; - int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, nnpmap, nghost); + int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, aparam, nnpmap, nghost); assert (nloc == ret); run_model (dener, dforce_, dvirial, datom_energy_, datom_virial_, session, input_tensors, nnpmap, nghost); @@ -756,7 +777,9 @@ init (const vector & models, const int & gpu_rank) cell_size = rcut; ntypes = get_scalar("descrpt_attr/ntypes"); dfparam = get_scalar("fitting_attr/dfparam"); + daparam = get_scalar("fitting_attr/daparam"); if (dfparam < 0) dfparam = 0; + if (daparam < 0) daparam = 0; // rcut = get_rcut(); // cell_size = rcut; // ntypes = get_ntypes(); @@ -783,7 +806,9 @@ init (const vector & models, const int & gpu_rank) cell_size = rcut; ntypes = get_scalar("descrpt_attr/ntypes"); dfparam = get_scalar("fitting_attr/dfparam"); + daparam = get_scalar("fitting_attr/daparam"); if (dfparam < 0) dfparam = 0; + if (daparam < 0) daparam = 0; // rcut = get_rcut(); // cell_size = rcut; // ntypes = get_ntypes(); @@ -815,6 +840,20 @@ get_scalar(const string name) const return myrcut; } +void +NNPInterModelDevi:: +validate_fparam_aparam(const int & nloc, + const vector &fparam, + const vector &aparam)const +{ + if (fparam.size() != dfparam) { + throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); + } + if (aparam.size() != daparam * nloc) { + throw std::runtime_error("the dim of atom parameter provided is not consistent with what the model uses"); + } +} + void NNPInterModelDevi:: compute (ENERGYTYPE & dener, @@ -824,17 +863,16 @@ compute (ENERGYTYPE & dener, const vector & dcoord_, const vector & datype_, const vector & dbox, - const vector fparam) + const vector & fparam, + const vector & aparam) { if (numb_models == 0) return; - if (fparam.size() != dfparam) { - throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); - } NNPAtomMap nnpmap (datype_.begin(), datype_.end()); + validate_fparam_aparam(nnpmap.get_type().size(), fparam, aparam); std::vector> input_tensors; - int nloc = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, cell_size, fparam, nnpmap); + int nloc = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, cell_size, fparam, aparam, nnpmap); vector all_energy (numb_models); vector > all_force (numb_models); @@ -873,24 +911,22 @@ compute (vector & all_energy, const vector & dbox, const int nghost, const LammpsNeighborList & lmp_list, - const vector fparam) + const vector & fparam, + const vector & aparam) { if (numb_models == 0) return; - if (fparam.size() != dfparam) { - throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); - } - int nall = dcoord_.size() / 3; int nloc = nall - nghost; NNPAtomMap nnpmap (datype_.begin(), datype_.begin() + nloc); assert (nloc == nnpmap.get_type().size()); + validate_fparam_aparam(nloc, fparam, aparam); InternalNeighborList nlist; convert_nlist_lmp_internal (nlist, lmp_list); shuffle_nlist (nlist, nnpmap); std::vector> input_tensors; - int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, nnpmap, nghost); + int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, aparam, nnpmap, nghost); assert (nloc == ret); all_energy.resize (numb_models); @@ -914,24 +950,22 @@ compute (vector & all_energy, const vector & dbox, const int nghost, const LammpsNeighborList & lmp_list, - const vector fparam) + const vector & fparam, + const vector & aparam) { if (numb_models == 0) return; - if (fparam.size() != dfparam) { - throw std::runtime_error("the dim of frame parameter provided is not consistent with what the model uses"); - } - int nall = dcoord_.size() / 3; int nloc = nall - nghost; NNPAtomMap nnpmap (datype_.begin(), datype_.begin() + nloc); assert (nloc == nnpmap.get_type().size()); + validate_fparam_aparam(nloc, fparam, aparam); InternalNeighborList nlist; convert_nlist_lmp_internal (nlist, lmp_list); shuffle_nlist (nlist, nnpmap); std::vector> input_tensors; - int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, nnpmap, nghost); + int ret = make_input_tensors (input_tensors, dcoord_, ntypes, datype_, dbox, nlist, fparam, aparam, nnpmap, nghost); assert (nloc == ret); all_energy.resize (numb_models); diff --git a/source/lmp/deepmd_fix_ttm.patch b/source/lmp/deepmd_fix_ttm.patch new file mode 100644 index 0000000000..8ca19d388d --- /dev/null +++ b/source/lmp/deepmd_fix_ttm.patch @@ -0,0 +1,34 @@ +diff '--color=auto' -Naur lammps-16Mar18/src/USER-MISC/fix_ttm_mod.h lammps-16Mar18.new/src/USER-MISC/fix_ttm_mod.h +--- lammps-16Mar18/src/USER-MISC/fix_ttm_mod.h 2017-06-22 21:57:19.000000000 +0800 ++++ lammps-16Mar18.new/src/USER-MISC/fix_ttm_mod.h 2019-10-16 09:58:38.228813486 +0800 +@@ -21,6 +21,7 @@ + #define LMP_FIX_TTM_MOD_H + + #include "fix.h" ++#include + + namespace LAMMPS_NS { + +@@ -51,6 +52,22 @@ + double memory_usage(); + void grow_arrays(int); + double compute_vector(int); ++ ////////////////////////////////////////////////// ++ // added by deepmd-kit ++ std::vector get_nodes() const ++ { ++ std::vector tmp(3); ++ tmp[0] = nxnodes; ++ tmp[1] = nynodes; ++ tmp[2] = nznodes; ++ return tmp; ++ }; ++ double *** const get_T_electron()const ++ { ++ return T_electron; ++ } ++ // added by deepmd-kit ++ ////////////////////////////////////////////////// + + private: + int me; diff --git a/source/lmp/env.sh.in b/source/lmp/env.sh.in index 6f046938cf..9b7676fcd9 100644 --- a/source/lmp/env.sh.in +++ b/source/lmp/env.sh.in @@ -6,6 +6,6 @@ TF_INCLUDE_DIRS=`echo $TENSORFLOW_INCLUDE_DIRS | sed "s/;/ -I/g"` TF_LIBRARY_PATH=`echo $TENSORFLOW_LIBRARY_PATH | sed "s/;/ -L/g"` TF_RPATH=`echo $TENSORFLOW_LIBRARY_PATH | sed "s/;/ -Wl,-rpath=/g"` -NNP_INC=" -std=c++11 @PREC_DEF@ -I$TF_INCLUDE_DIRS -I$DEEPMD_ROOT/include/deepmd " +NNP_INC=" -std=c++11 @PREC_DEF@ @TTM_DEF@ -I$TF_INCLUDE_DIRS -I$DEEPMD_ROOT/include/deepmd " NNP_PATH=" -L$TF_LIBRARY_PATH -L$DEEPMD_ROOT/lib" NNP_LIB=" -Wl,--no-as-needed -l@LIB_DEEPMD_OP@ -l@LIB_DEEPMD@ -ltensorflow_cc -ltensorflow_framework -Wl,-rpath=$TF_RPATH -Wl,-rpath=$DEEPMD_ROOT/lib" diff --git a/source/lmp/pair_nnp.cpp b/source/lmp/pair_nnp.cpp index d1d7509ff0..372adcba77 100644 --- a/source/lmp/pair_nnp.cpp +++ b/source/lmp/pair_nnp.cpp @@ -12,6 +12,11 @@ #include "neighbor.h" #include "neigh_list.h" #include "neigh_request.h" +#include "modify.h" +#include "fix.h" +#ifdef USE_TTM +#include "fix_ttm_mod.h" +#endif #include "pair_nnp.h" @@ -95,6 +100,81 @@ ana_st (double & max, } } +static void +make_uniform_aparam( +#ifdef HIGH_PREC + vector & daparam, + const vector & aparam, + const int & nlocal +#else + vector & daparam, + const vector & aparam, + const int & nlocal +#endif + ) +{ + unsigned dim_aparam = aparam.size(); + daparam.resize(dim_aparam * nlocal); + for (int ii = 0; ii < nlocal; ++ii){ + for (int jj = 0; jj < dim_aparam; ++jj){ + daparam[ii*dim_aparam+jj] = aparam[jj]; + } + } +} + +#ifdef USE_TTM +void PairNNP::make_ttm_aparam( +#ifdef HIGH_PREC + vector & daparam +#else + vector & daparam +#endif + ) +{ + assert(do_ttm); + // get ttm_fix + const FixTTMMod * ttm_fix = NULL; + for (int ii = 0; ii < modify->nfix; ii++) { + if (string(modify->fix[ii]->id) == ttm_fix_id){ + ttm_fix = dynamic_cast(modify->fix[ii]); + } + } + assert(ttm_fix); + // modify + double **x = atom->x; + int *mask = atom->mask; + int nlocal = atom->nlocal; + vector nnodes = ttm_fix->get_nodes(); + int nxnodes = nnodes[0]; + int nynodes = nnodes[1]; + int nznodes = nnodes[2]; + double *** const T_electron = ttm_fix->get_T_electron(); + double dx = domain->xprd/nxnodes; + double dy = domain->yprd/nynodes; + double dz = domain->zprd/nynodes; + // resize daparam + daparam.resize(nlocal); + // loop over atoms to assign aparam + for (int ii = 0; ii < nlocal; ii++) { + if (mask[ii] & ttm_fix->groupbit) { + double xscale = (x[ii][0] - domain->boxlo[0])/domain->xprd; + double yscale = (x[ii][1] - domain->boxlo[1])/domain->yprd; + double zscale = (x[ii][2] - domain->boxlo[2])/domain->zprd; + int ixnode = static_cast(xscale*nxnodes); + int iynode = static_cast(yscale*nynodes); + int iznode = static_cast(zscale*nznodes); + while (ixnode > nxnodes-1) ixnode -= nxnodes; + while (iynode > nynodes-1) iynode -= nynodes; + while (iznode > nznodes-1) iznode -= nznodes; + while (ixnode < 0) ixnode += nxnodes; + while (iynode < 0) iynode += nynodes; + while (iznode < 0) iznode += nznodes; + daparam[ii] = T_electron[ixnode][iynode][iznode]; + } + } +} +#endif + PairNNP::PairNNP(LAMMPS *lmp) : Pair(lmp) @@ -113,6 +193,7 @@ PairNNP::PairNNP(LAMMPS *lmp) out_rel = 0; eps = 0.; scale = NULL; + do_ttm = false; // set comm size needed by this Pair comm_reverse = 1; @@ -176,6 +257,11 @@ void PairNNP::compute(int eflag, int vflag) vector dvirial (9, 0); vector dcoord (nall * 3, 0.); vector dbox (9, 0) ; +#ifdef HIGH_PREC + vector daparam; +#else + vector daparam; +#endif // get box dbox[0] = domain->h[0]; // xx @@ -191,7 +277,17 @@ void PairNNP::compute(int eflag, int vflag) dcoord[ii*3+dd] = x[ii][dd] - domain->boxlo[dd]; } } - + + // uniform aparam + if (aparam.size() > 0){ + make_uniform_aparam(daparam, aparam, nlocal); + } + else if (do_ttm) { +#ifdef USE_TTM + make_ttm_aparam(daparam); +#endif + } + // compute bool single_model = (numb_models == 1); bool multi_models_no_mod_devi = (numb_models > 1 && (out_freq == 0 || update->ntimestep % out_freq != 0)); @@ -201,7 +297,7 @@ void PairNNP::compute(int eflag, int vflag) if (single_model || multi_models_no_mod_devi) { if ( ! (eflag_atom || vflag_atom) ) { #ifdef HIGH_PREC - nnp_inter.compute (dener, dforce, dvirial, dcoord, dtype, dbox, nghost, lmp_list, fparam); + nnp_inter.compute (dener, dforce, dvirial, dcoord, dtype, dbox, nghost, lmp_list, fparam, daparam); #else vector dcoord_(dcoord.size()); vector dbox_(dbox.size()); @@ -210,7 +306,7 @@ void PairNNP::compute(int eflag, int vflag) vector dforce_(dforce.size(), 0); vector dvirial_(dvirial.size(), 0); double dener_ = 0; - nnp_inter.compute (dener_, dforce_, dvirial_, dcoord_, dtype, dbox_, nghost, lmp_list, fparam); + nnp_inter.compute (dener_, dforce_, dvirial_, dcoord_, dtype, dbox_, nghost, lmp_list, fparam, daparam); for (unsigned dd = 0; dd < dforce.size(); ++dd) dforce[dd] = dforce_[dd]; for (unsigned dd = 0; dd < dvirial.size(); ++dd) dvirial[dd] = dvirial_[dd]; dener = dener_; @@ -221,7 +317,7 @@ void PairNNP::compute(int eflag, int vflag) vector deatom (nall * 1, 0); vector dvatom (nall * 9, 0); #ifdef HIGH_PREC - nnp_inter.compute (dener, dforce, dvirial, deatom, dvatom, dcoord, dtype, dbox, nghost, lmp_list, fparam); + nnp_inter.compute (dener, dforce, dvirial, deatom, dvatom, dcoord, dtype, dbox, nghost, lmp_list, fparam, daparam); #else vector dcoord_(dcoord.size()); vector dbox_(dbox.size()); @@ -232,7 +328,7 @@ void PairNNP::compute(int eflag, int vflag) vector deatom_(dforce.size(), 0); vector dvatom_(dforce.size(), 0); double dener_ = 0; - nnp_inter.compute (dener_, dforce_, dvirial_, deatom_, dvatom_, dcoord_, dtype, dbox_, nghost, lmp_list, fparam); + nnp_inter.compute (dener_, dforce_, dvirial_, deatom_, dvatom_, dcoord_, dtype, dbox_, nghost, lmp_list, fparam, daparam); for (unsigned dd = 0; dd < dforce.size(); ++dd) dforce[dd] = dforce_[dd]; for (unsigned dd = 0; dd < dvirial.size(); ++dd) dvirial[dd] = dvirial_[dd]; for (unsigned dd = 0; dd < deatom.size(); ++dd) deatom[dd] = deatom_[dd]; @@ -262,7 +358,7 @@ void PairNNP::compute(int eflag, int vflag) vector> all_virial; vector> all_atom_energy; vector> all_atom_virial; - nnp_inter_model_devi.compute(all_energy, all_force, all_virial, all_atom_energy, all_atom_virial, dcoord, dtype, dbox, nghost, lmp_list, fparam); + nnp_inter_model_devi.compute(all_energy, all_force, all_virial, all_atom_energy, all_atom_virial, dcoord, dtype, dbox, nghost, lmp_list, fparam, daparam); // nnp_inter_model_devi.compute_avg (dener, all_energy); // nnp_inter_model_devi.compute_avg (dforce, all_force); // nnp_inter_model_devi.compute_avg (dvirial, all_virial); @@ -288,7 +384,7 @@ void PairNNP::compute(int eflag, int vflag) vector> all_virial_; vector> all_atom_energy_; vector> all_atom_virial_; - nnp_inter_model_devi.compute(all_energy_, all_force_, all_virial_, all_atom_energy_, all_atom_virial_, dcoord_, dtype, dbox_, nghost, lmp_list, fparam); + nnp_inter_model_devi.compute(all_energy_, all_force_, all_virial_, all_atom_energy_, all_atom_virial_, dcoord_, dtype, dbox_, nghost, lmp_list, fparam, daparam); // nnp_inter_model_devi.compute_avg (dener_, all_energy_); // nnp_inter_model_devi.compute_avg (dforce_, all_force_); // nnp_inter_model_devi.compute_avg (dvirial_, all_virial_); @@ -496,6 +592,8 @@ is_key (const string& input) keys.push_back("out_freq"); keys.push_back("out_file"); keys.push_back("fparam"); + keys.push_back("aparam"); + keys.push_back("ttm"); keys.push_back("atomic"); keys.push_back("relative"); @@ -529,6 +627,7 @@ void PairNNP::settings(int narg, char **arg) cutoff = nnp_inter.cutoff (); numb_types = nnp_inter.numb_types(); dim_fparam = nnp_inter.dim_fparam(); + dim_aparam = nnp_inter.dim_aparam(); } else { nnp_inter.init (arg[0], get_node_rank()); @@ -536,9 +635,11 @@ void PairNNP::settings(int narg, char **arg) cutoff = nnp_inter_model_devi.cutoff(); numb_types = nnp_inter_model_devi.numb_types(); dim_fparam = nnp_inter_model_devi.dim_fparam(); + dim_aparam = nnp_inter_model_devi.dim_aparam(); assert(cutoff == nnp_inter.cutoff()); assert(numb_types == nnp_inter.numb_types()); assert(dim_fparam == nnp_inter.dim_fparam()); + assert(dim_aparam == nnp_inter.dim_aparam()); } out_freq = 100; @@ -547,6 +648,7 @@ void PairNNP::settings(int narg, char **arg) out_rel = 0; eps = 0.; fparam.clear(); + aparam.clear(); while (iarg < narg) { if (! is_key(arg[iarg])) { error->all(FLERR,"Illegal pair_style command\nwrong number of parameters\n"); @@ -572,21 +674,49 @@ void PairNNP::settings(int narg, char **arg) } iarg += 1 + dim_fparam ; } - else if (string(arg[iarg]) == string("atomic")) { - out_each = 1; - iarg += 1; + else if (string(arg[iarg]) == string("aparam")) { + for (int ii = 0; ii < dim_aparam; ++ii){ + if (iarg+1+ii >= narg || is_key(arg[iarg+1+ii])) { + char tmp[1024]; + sprintf(tmp, "Illegal aparam, the dimension should be %d", dim_aparam); + error->all(FLERR, tmp); + } + aparam.push_back(atof(arg[iarg+1+ii])); + } + iarg += 1 + dim_aparam ; + } + else if (string(arg[iarg]) == string("ttm")) { +#ifdef USE_TTM + for (int ii = 0; ii < 1; ++ii){ + if (iarg+1+ii >= narg || is_key(arg[iarg+1+ii])) { + error->all(FLERR, "invalid ttm key: should be ttm ttm_fix_id(str)"); } - else if (string(arg[iarg]) == string("relative")) { - out_rel = 1; + } + do_ttm = true; + ttm_fix_id = arg[iarg+1]; + iarg += 1 + 1; +#else + error->all(FLERR, "The deepmd-kit was compiled without support for TTM, please rebuild it with -DUSE_TTM"); +#endif + } + else if (string(arg[iarg]) == string("atomic")) { + out_each = 1; + iarg += 1; + } + else if (string(arg[iarg]) == string("relative")) { + out_rel = 1; #ifdef HIGH_PREC - eps = atof(arg[iarg+1]); + eps = atof(arg[iarg+1]); #else - eps = strtof(arg[iarg+1]); + eps = strtof(arg[iarg+1], NULL); #endif - iarg += 2; - } + iarg += 2; + } + } + if (out_freq < 0) error->all(FLERR,"Illegal out_freq, should be >= 0"); + if (do_ttm && aparam.size() > 0) { + error->all(FLERR,"aparam and ttm should NOT be set simultaneously"); } - if (out_freq < 0) error->all(FLERR,"Illegal out_freq, should be >= 0"); if (comm->me == 0){ if (numb_models > 1 && out_freq > 0){ @@ -623,6 +753,13 @@ void PairNNP::settings(int narg, char **arg) } cout << endl; } + if (aparam.size() > 0) { + cout << pre << "using aparam(s): " ; + for (int ii = 0; ii < aparam.size(); ++ii){ + cout << aparam[ii] << " " ; + } + cout << endl; + } } comm_reverse = numb_models * 3; diff --git a/source/lmp/pair_nnp.h.in b/source/lmp/pair_nnp.h.in index 5c43abc062..fbc71a4e6f 100644 --- a/source/lmp/pair_nnp.h.in +++ b/source/lmp/pair_nnp.h.in @@ -79,15 +79,27 @@ private: int out_freq; string out_file; int dim_fparam; + int dim_aparam; int out_each; int out_rel; #ifdef HIGH_PREC vector fparam; + vector aparam; double eps; #else vector fparam; + vector aparam; float eps; #endif + void make_ttm_aparam( +#ifdef HIGH_PREC + vector & dparam +#else + vector & dparam +#endif + ); + bool do_ttm; + string ttm_fix_id; }; } diff --git a/source/scripts/freeze.py b/source/scripts/freeze.py index eac57821c7..62a7f4559f 100755 --- a/source/scripts/freeze.py +++ b/source/scripts/freeze.py @@ -37,9 +37,11 @@ def _make_node_names(model_type = None) : if model_type == 'ener': - nodes = "o_energy,o_force,o_virial,o_atom_energy,o_atom_virial,descrpt_attr/rcut,descrpt_attr/ntypes,fitting_attr/dfparam,model_attr/tmap,model_attr/model_type" + nodes = "o_energy,o_force,o_virial,o_atom_energy,o_atom_virial,descrpt_attr/rcut,descrpt_attr/ntypes,fitting_attr/dfparam,fitting_attr/daparam,model_attr/tmap,model_attr/model_type" elif model_type == 'wfc': nodes = "o_wfc,descrpt_attr/rcut,descrpt_attr/ntypes,model_attr/tmap,model_attr/sel_type,model_attr/model_type" + elif model_type == 'dipole': + nodes = "o_dipole,descrpt_attr/rcut,descrpt_attr/ntypes,model_attr/tmap,model_attr/sel_type,model_attr/model_type" elif model_type == 'polar': nodes = "o_polar,descrpt_attr/rcut,descrpt_attr/ntypes,model_attr/tmap,model_attr/sel_type,model_attr/model_type" else: diff --git a/source/tests/common.py b/source/tests/common.py index 1a7239f6c6..812a238c4a 100644 --- a/source/tests/common.py +++ b/source/tests/common.py @@ -30,6 +30,7 @@ def gen_data() : sys.data['forces'] = np.zeros([nframes,natoms,3]) sys.to_deepmd_npy('system', prec=np.float64) np.save('system/set.000/fparam.npy', tmpdata.fparam) + np.save('system/set.000/aparam.npy', tmpdata.aparam.reshape([nframes, natoms, 2])) class Data(): def __init__ (self, @@ -41,6 +42,7 @@ def __init__ (self, np.random.seed(seed) self.coord += rand_pert * np.random.random(self.coord.shape) self.fparam = np.array([[0.1, 0.2]]) + self.aparam = np.tile(self.fparam, [1, 6]) self.atype = np.array([0, 1, 1, 0, 1, 1], dtype = int) self.cell = 20 * np.eye(3) self.nframes = 1 diff --git a/source/tests/polar_se_a.json b/source/tests/polar_se_a.json index 33795d0972..1c4694636e 100644 --- a/source/tests/polar_se_a.json +++ b/source/tests/polar_se_a.json @@ -18,6 +18,7 @@ "fitting_net": { "type": "polar", "pol_type": [0], + "fit_diag": false, "neuron": [100, 100, 100], "resnet_dt": true, "seed": 1, diff --git a/source/tests/test_fitting_stat.py b/source/tests/test_fitting_stat.py new file mode 100644 index 0000000000..aed3a6992a --- /dev/null +++ b/source/tests/test_fitting_stat.py @@ -0,0 +1,78 @@ +import os,sys,json +import numpy as np +import unittest + +from collections import defaultdict +from deepmd.DescrptSeA import DescrptSeA +from deepmd.Fitting import EnerFitting + +input_json = 'water_se_a_afparam.json' + +def _make_fake_data(sys_natoms, sys_nframes, avgs, stds): + all_stat = defaultdict(list) + nsys = len(sys_natoms) + ndof = len(avgs) + for ii in range(nsys): + tmp_data_f = [] + tmp_data_a = [] + for jj in range(ndof) : + tmp_data_f.append(np.random.normal(loc = avgs[jj], + scale = stds[jj], + size = (sys_nframes[ii],1))) + tmp_data_a.append(np.random.normal(loc = avgs[jj], + scale = stds[jj], + size = (sys_nframes[ii], sys_natoms[ii]))) + tmp_data_f = np.transpose(tmp_data_f, (1,2,0)) + tmp_data_a = np.transpose(tmp_data_a, (1,2,0)) + all_stat['fparam'].append(tmp_data_f) + all_stat['aparam'].append(tmp_data_a) + return all_stat + +def _brute_fparam(data, ndim): + adata = data['fparam'] + all_data = [] + for ii in adata: + tmp = np.reshape(ii, [-1, ndim]) + if len(all_data) == 0: + all_data = np.array(tmp) + else: + all_data = np.concatenate((all_data, tmp), axis = 0) + avg = np.average(all_data, axis = 0) + std = np.std(all_data, axis = 0) + return avg, std + +def _brute_aparam(data, ndim): + adata = data['aparam'] + all_data = [] + for ii in adata: + tmp = np.reshape(ii, [-1, ndim]) + if len(all_data) == 0: + all_data = np.array(tmp) + else: + all_data = np.concatenate((all_data, tmp), axis = 0) + avg = np.average(all_data, axis = 0) + std = np.std(all_data, axis = 0) + return avg, std + + +class TestEnerFittingStat (unittest.TestCase) : + def test (self) : + with open(input_json) as fp: + jdata = json.load(fp) + jdata = jdata['model'] + descrpt = DescrptSeA(jdata['descriptor']) + fitting = EnerFitting(jdata['fitting_net'], descrpt) + avgs = [0, 10] + stds = [2, 0.4] + sys_natoms = [10, 100] + sys_nframes = [5, 2] + all_data = _make_fake_data(sys_natoms, sys_nframes, avgs, stds) + frefa, frefs = _brute_fparam(all_data, len(avgs)) + arefa, arefs = _brute_aparam(all_data, len(avgs)) + fitting.compute_dstats(all_data, protection = 1e-2) + # print(frefa, frefs) + for ii in range(len(avgs)): + self.assertAlmostEqual(frefa[ii], fitting.fparam_avg[ii]) + self.assertAlmostEqual(frefs[ii], fitting.fparam_std[ii]) + self.assertAlmostEqual(arefa[ii], fitting.aparam_avg[ii]) + self.assertAlmostEqual(arefs[ii], fitting.aparam_std[ii]) diff --git a/source/tests/test_model_loc_frame.py b/source/tests/test_model_loc_frame.py index 2fdc5fad76..5981ee6497 100644 --- a/source/tests/test_model_loc_frame.py +++ b/source/tests/test_model_loc_frame.py @@ -42,7 +42,14 @@ def test_model(self): fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = Model(jdata['model'], descrpt, fitting) - model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']] + } + model._compute_dstats(input_data) model.bias_atom_e = data.compute_energy_shift() t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') diff --git a/source/tests/test_model_se_a.py b/source/tests/test_model_se_a.py index 37d9120526..d5be148358 100644 --- a/source/tests/test_model_se_a.py +++ b/source/tests/test_model_se_a.py @@ -42,7 +42,14 @@ def test_model(self): fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = Model(jdata['model'], descrpt, fitting) - model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']] + } + model._compute_dstats(input_data) model.bias_atom_e = data.compute_energy_shift() t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') diff --git a/source/tests/test_model_se_a_aparam.py b/source/tests/test_model_se_a_aparam.py new file mode 100644 index 0000000000..293145e377 --- /dev/null +++ b/source/tests/test_model_se_a_aparam.py @@ -0,0 +1,120 @@ +import dpdata,os,sys,json,unittest +import numpy as np +from deepmd.env import tf +from common import Data,gen_data + +from deepmd.RunOptions import RunOptions +from deepmd.DataSystem import DataSystem +from deepmd.DescrptSeA import DescrptSeA +from deepmd.Fitting import EnerFitting +from deepmd.Model import Model +from deepmd.common import j_must_have, j_must_have_d, j_have + +global_ener_float_precision = tf.float64 +global_tf_float_precision = tf.float64 +global_np_float_precision = np.float64 + +class TestModel(unittest.TestCase): + def setUp(self) : + gen_data() + + def test_model(self): + jfile = 'water_se_a_aparam.json' + with open(jfile) as fp: + jdata = json.load (fp) + run_opt = RunOptions(None) + systems = j_must_have(jdata, 'systems') + set_pfx = j_must_have(jdata, 'set_prefix') + batch_size = j_must_have(jdata, 'batch_size') + test_size = j_must_have(jdata, 'numb_test') + batch_size = 1 + test_size = 1 + stop_batch = j_must_have(jdata, 'stop_batch') + rcut = j_must_have (jdata['model']['descriptor'], 'rcut') + + data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt = None) + + test_data = data.get_test () + # manually set aparam + test_data['aparam'] = np.load('system/set.000/aparam.npy') + numb_test = 1 + + descrpt = DescrptSeA(jdata['model']['descriptor']) + fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) + model = Model(jdata['model'], descrpt, fitting) + + # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']], + 'aparam': [test_data['aparam']], + } + model._compute_dstats(input_data) + model.bias_atom_e = data.compute_energy_shift() + + t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') + t_energy = tf.placeholder(global_ener_float_precision, [None], name='t_energy') + t_force = tf.placeholder(global_tf_float_precision, [None], name='t_force') + t_virial = tf.placeholder(global_tf_float_precision, [None], name='t_virial') + t_atom_ener = tf.placeholder(global_tf_float_precision, [None], name='t_atom_ener') + t_coord = tf.placeholder(global_tf_float_precision, [None], name='i_coord') + t_type = tf.placeholder(tf.int32, [None], name='i_type') + t_natoms = tf.placeholder(tf.int32, [model.ntypes+2], name='i_natoms') + t_box = tf.placeholder(global_tf_float_precision, [None, 9], name='i_box') + t_mesh = tf.placeholder(tf.int32, [None], name='i_mesh') + t_aparam = tf.placeholder(global_tf_float_precision, [None], name='i_aparam') + is_training = tf.placeholder(tf.bool) + input_dict = {} + input_dict['aparam'] = t_aparam + + model_pred\ + = model.build (t_coord, + t_type, + t_natoms, + t_box, + t_mesh, + input_dict, + suffix = "se_a_aparam", + reuse = False) + energy = model_pred['energy'] + force = model_pred['force'] + virial = model_pred['virial'] + atom_ener = model_pred['atom_ener'] + + feed_dict_test = {t_prop_c: test_data['prop_c'], + t_energy: test_data['energy'] [:numb_test], + t_force: np.reshape(test_data['force'] [:numb_test, :], [-1]), + t_virial: np.reshape(test_data['virial'] [:numb_test, :], [-1]), + t_atom_ener: np.reshape(test_data['atom_ener'][:numb_test, :], [-1]), + t_coord: np.reshape(test_data['coord'] [:numb_test, :], [-1]), + t_box: test_data['box'] [:numb_test, :], + t_type: np.reshape(test_data['type'] [:numb_test, :], [-1]), + t_natoms: test_data['natoms_vec'], + t_mesh: test_data['default_mesh'], + t_aparam: np.reshape(test_data['aparam'] [:numb_test, :], [-1]), + is_training: False} + + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + [e, f, v] = sess.run([energy, force, virial], + feed_dict = feed_dict_test) + + e = e.reshape([-1]) + f = f.reshape([-1]) + v = v.reshape([-1]) + refe = [61.35473702079649] + reff = [7.789591210641927388e-02,9.411176646369459609e-02,3.785806413688173194e-03,1.430830954178063386e-01,1.146964190520970150e-01,-1.320340288927138173e-02,-7.308720494747594776e-02,6.508269338140809657e-02,5.398739145542804643e-04,5.863268336973800898e-02,-1.603409523950408699e-01,-5.083084610994957619e-03,-2.551569799443983988e-01,3.087934885732580501e-02,1.508590526622844222e-02,4.863249399791078065e-02,-1.444292753594846324e-01,-1.125098094204559241e-03] + refv = [-6.069498397488943819e-01,1.101778888191114192e-01,1.981907430646132409e-02,1.101778888191114608e-01,-3.315612988100872793e-01,-5.999739184898976799e-03,1.981907430646132756e-02,-5.999739184898974197e-03,-1.198656608172396325e-03] + refe = np.reshape(refe, [-1]) + reff = np.reshape(reff, [-1]) + refv = np.reshape(refv, [-1]) + + places = 10 + for ii in range(e.size) : + self.assertAlmostEqual(e[ii], refe[ii], places = places) + for ii in range(f.size) : + self.assertAlmostEqual(f[ii], reff[ii], places = places) + for ii in range(v.size) : + self.assertAlmostEqual(v[ii], refv[ii], places = places) diff --git a/source/tests/test_model_se_a_fparam.py b/source/tests/test_model_se_a_fparam.py index 76ce3b9296..4f85af0579 100644 --- a/source/tests/test_model_se_a_fparam.py +++ b/source/tests/test_model_se_a_fparam.py @@ -41,7 +41,15 @@ def test_model(self): fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = Model(jdata['model'], descrpt, fitting) - model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']], + 'fparam': [test_data['fparam']], + } + model._compute_dstats(input_data) model.bias_atom_e = data.compute_energy_shift() t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') @@ -94,9 +102,9 @@ def test_model(self): e = e.reshape([-1]) f = f.reshape([-1]) v = v.reshape([-1]) - refe = [6.135136929183754972e+01] - reff = [7.761477777656561328e-02,9.383013575207051205e-02,3.776776376267230399e-03,1.428268971463224069e-01,1.143858253900619654e-01,-1.318441687719179231e-02,-7.271897092708884403e-02,6.494907553857684479e-02,5.355599592111062821e-04,5.840910251709752199e-02,-1.599042555763417750e-01,-5.067165555590445389e-03,-2.546246315216804113e-01,3.073296814647456451e-02,1.505994759166155023e-02,4.849282500878367153e-02,-1.439937492508420736e-01,-1.120701494357654411e-03] - refv = [-6.054303146013112480e-01,1.097859194719944115e-01,1.977605183964963390e-02,1.097859194719943976e-01,-3.306167096812382966e-01,-5.978855662865613894e-03,1.977605183964964083e-02,-5.978855662865616497e-03,-1.196331922996723236e-03] + refe = [61.35473702079649] + reff = [7.789591210641927388e-02,9.411176646369459609e-02,3.785806413688173194e-03,1.430830954178063386e-01,1.146964190520970150e-01,-1.320340288927138173e-02,-7.308720494747594776e-02,6.508269338140809657e-02,5.398739145542804643e-04,5.863268336973800898e-02,-1.603409523950408699e-01,-5.083084610994957619e-03,-2.551569799443983988e-01,3.087934885732580501e-02,1.508590526622844222e-02,4.863249399791078065e-02,-1.444292753594846324e-01,-1.125098094204559241e-03] + refv = [-6.069498397488943819e-01,1.101778888191114192e-01,1.981907430646132409e-02,1.101778888191114608e-01,-3.315612988100872793e-01,-5.999739184898976799e-03,1.981907430646132756e-02,-5.999739184898974197e-03,-1.198656608172396325e-03] refe = np.reshape(refe, [-1]) reff = np.reshape(reff, [-1]) refv = np.reshape(refv, [-1]) diff --git a/source/tests/test_model_se_a_srtab.py b/source/tests/test_model_se_a_srtab.py index 91021c2c40..82d1492067 100644 --- a/source/tests/test_model_se_a_srtab.py +++ b/source/tests/test_model_se_a_srtab.py @@ -51,7 +51,14 @@ def test_model(self): fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = Model(jdata['model'], descrpt, fitting) - model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']] + } + model._compute_dstats(input_data) model.bias_atom_e = data.compute_energy_shift() t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') diff --git a/source/tests/test_model_se_r.py b/source/tests/test_model_se_r.py index 36e9772c92..965e89a1bd 100644 --- a/source/tests/test_model_se_r.py +++ b/source/tests/test_model_se_r.py @@ -41,7 +41,14 @@ def test_model(self): fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = Model(jdata['model'], descrpt, fitting) - model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']] + } + model._compute_dstats(input_data) model.bias_atom_e = data.compute_energy_shift() t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') diff --git a/source/tests/test_polar_se_a.py b/source/tests/test_polar_se_a.py index 3a0f673d51..0506c84ff2 100644 --- a/source/tests/test_polar_se_a.py +++ b/source/tests/test_polar_se_a.py @@ -41,7 +41,15 @@ def test_model(self): fitting = PolarFittingSeA(jdata['model']['fitting_net'], descrpt) model = PolarModel(jdata['model'], descrpt, fitting) - model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']], + 'fparam': [test_data['fparam']], + } + model._compute_dstats(input_data) t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') t_energy = tf.placeholder(global_ener_float_precision, [None], name='t_energy') diff --git a/source/tests/test_wfc.py b/source/tests/test_wfc.py new file mode 100644 index 0000000000..fc0d850b01 --- /dev/null +++ b/source/tests/test_wfc.py @@ -0,0 +1,97 @@ +import dpdata,os,sys,json,unittest +import numpy as np +from deepmd.env import tf +from common import Data,gen_data + +from deepmd.RunOptions import RunOptions +from deepmd.DataSystem import DataSystem +from deepmd.DescrptLocFrame import DescrptLocFrame +from deepmd.Fitting import WFCFitting +from deepmd.Model import WFCModel +from deepmd.common import j_must_have, j_must_have_d, j_have + +global_ener_float_precision = tf.float64 +global_tf_float_precision = tf.float64 +global_np_float_precision = np.float64 + +class TestModel(unittest.TestCase): + def setUp(self) : + gen_data() + + def test_model(self): + jfile = 'wfc.json' + with open(jfile) as fp: + jdata = json.load (fp) + run_opt = RunOptions(None) + systems = j_must_have(jdata, 'systems') + set_pfx = j_must_have(jdata, 'set_prefix') + batch_size = j_must_have(jdata, 'batch_size') + test_size = j_must_have(jdata, 'numb_test') + batch_size = 1 + test_size = 1 + stop_batch = j_must_have(jdata, 'stop_batch') + rcut = j_must_have (jdata['model']['descriptor'], 'rcut') + + data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt = None) + + test_data = data.get_test () + numb_test = 1 + + descrpt = DescrptLocFrame(jdata['model']['descriptor']) + fitting = WFCFitting(jdata['model']['fitting_net'], descrpt) + model = WFCModel(jdata['model'], descrpt, fitting) + + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']], + 'fparam': [test_data['fparam']], + } + model._compute_dstats(input_data) + + t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') + t_energy = tf.placeholder(global_ener_float_precision, [None], name='t_energy') + t_force = tf.placeholder(global_tf_float_precision, [None], name='t_force') + t_virial = tf.placeholder(global_tf_float_precision, [None], name='t_virial') + t_atom_ener = tf.placeholder(global_tf_float_precision, [None], name='t_atom_ener') + t_coord = tf.placeholder(global_tf_float_precision, [None], name='i_coord') + t_type = tf.placeholder(tf.int32, [None], name='i_type') + t_natoms = tf.placeholder(tf.int32, [model.ntypes+2], name='i_natoms') + t_box = tf.placeholder(global_tf_float_precision, [None, 9], name='i_box') + t_mesh = tf.placeholder(tf.int32, [None], name='i_mesh') + is_training = tf.placeholder(tf.bool) + t_fparam = None + + model_pred \ + = model.build (t_coord, + t_type, + t_natoms, + t_box, + t_mesh, + t_fparam, + suffix = "wfc", + reuse = False) + wfc = model_pred['wfc'] + + feed_dict_test = {t_prop_c: test_data['prop_c'], + t_coord: np.reshape(test_data['coord'] [:numb_test, :], [-1]), + t_box: test_data['box'] [:numb_test, :], + t_type: np.reshape(test_data['type'] [:numb_test, :], [-1]), + t_natoms: test_data['natoms_vec'], + t_mesh: test_data['default_mesh'], + is_training: False} + + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + [p] = sess.run([wfc], feed_dict = feed_dict_test) + + p = p.reshape([-1]) + refp = [-9.105016838228578990e-01,7.196284362034099935e-01,-9.548516928185298014e-02,2.764615027095288724e+00,2.661319598995644520e-01,7.579512949131941846e-02,-2.107409067376114997e+00,-1.299080016614967414e-01,-5.962778584850070285e-01,2.913899917663253514e-01,-1.226917174638697094e+00,1.829523069930876655e+00,1.015704024959750873e+00,-1.792333611099589386e-01,5.032898080485321834e-01,1.808561721292949453e-01,2.468863482075112081e+00,-2.566442546384765100e-01,-1.467453783795173994e-01,-1.822963931552128658e+00,5.843600156865462747e-01,-1.493875280832117403e+00,1.693322352814763398e-01,-1.877325443995481624e+00] + + places = 6 + for ii in range(p.size) : + self.assertAlmostEqual(p[ii], refp[ii], places = places) + + + diff --git a/source/tests/water_se_a_afparam.json b/source/tests/water_se_a_afparam.json new file mode 100644 index 0000000000..ca2187b988 --- /dev/null +++ b/source/tests/water_se_a_afparam.json @@ -0,0 +1,57 @@ +{ + "_comment": " model parameters", + "model" : { + "descriptor" :{ + "type": "se_a", + "sel": [46, 92], + "rcut_smth": 5.80, + "rcut": 6.00, + "neuron": [25, 50, 100], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1 + }, + "fitting_net" : { + "neuron": [240, 240, 240], + "resnet_dt": true, + "numb_fparam": 2, + "numb_aparam": 2, + "seed": 1 + } + }, + + + "_comment": " traing controls", + "systems": ["system"], + "set_prefix": "set", + "stop_batch": 1000000, + "batch_size": 1, + "start_lr": 0.005, + "decay_steps": 5000, + "decay_rate": 0.95, + + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + + "seed": 1, + + "_comment": " display and restart", + "_comment": " frequencies counted in batch", + "disp_file": "lcurve.out", + "disp_freq": 100, + "numb_test": 1, + "save_freq": 1000, + "save_ckpt": "model.ckpt", + "load_ckpt": "model.ckpt", + "disp_training": true, + "time_training": true, + "profiling": false, + "profiling_file": "timeline.json", + + "_comment": "that's all" +} + diff --git a/source/tests/water_se_a_aparam.json b/source/tests/water_se_a_aparam.json new file mode 100644 index 0000000000..daaee54cb2 --- /dev/null +++ b/source/tests/water_se_a_aparam.json @@ -0,0 +1,56 @@ +{ + "_comment": " model parameters", + "model" : { + "descriptor" :{ + "type": "se_a", + "sel": [46, 92], + "rcut_smth": 5.80, + "rcut": 6.00, + "neuron": [25, 50, 100], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1 + }, + "fitting_net" : { + "neuron": [240, 240, 240], + "resnet_dt": true, + "numb_aparam": 2, + "seed": 1 + } + }, + + + "_comment": " traing controls", + "systems": ["system"], + "set_prefix": "set", + "stop_batch": 1000000, + "batch_size": 1, + "start_lr": 0.005, + "decay_steps": 5000, + "decay_rate": 0.95, + + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + + "seed": 1, + + "_comment": " display and restart", + "_comment": " frequencies counted in batch", + "disp_file": "lcurve.out", + "disp_freq": 100, + "numb_test": 1, + "save_freq": 1000, + "save_ckpt": "model.ckpt", + "load_ckpt": "model.ckpt", + "disp_training": true, + "time_training": true, + "profiling": false, + "profiling_file": "timeline.json", + + "_comment": "that's all" +} + diff --git a/source/tests/wfc.json b/source/tests/wfc.json new file mode 100644 index 0000000000..556ef2a992 --- /dev/null +++ b/source/tests/wfc.json @@ -0,0 +1,62 @@ +{ + "with_distrib": false, + "_comment": " model parameters", + "model":{ + "type": "polar", + "type_map": ["O", "H"], + "descriptor" :{ + "type": "loc_frame", + "sel_a": [16, 32], + "sel_r": [30, 60], + "rcut": 6.00, + "axis_rule": [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0], + "_comment": " default rule: []", + "_comment": " user defined rule: for each type provides two axes, ", + "_comment": " for each axis: (a_or_r, type, idx)", + "_comment": " if type < 0, exclude type -(type+1)", + "_comment": " for water (O:0, H:1) it can be", + "_comment": " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0]", + "_comment": " that's all" + }, + "fitting_net": { + "type": "wfc", + "wfc_numb": 4, + "sel_type": [0], + "neuron": [100, 100, 100], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "_comment": " that's all" + }, + + "learning_rate" :{ + "type": "exp", + "start_lr": 0.001, + "decay_steps": 5000, + "decay_rate": 0.95, + "_comment": "that's all" + }, + + "_comment": " traing controls", + "systems": ["system"], + "set_prefix": "set", + "stop_batch": 1000000, + "batch_size": [1], + + "seed": 1, + + "_comment": " display and restart", + "_comment": " frequencies counted in batch", + "disp_file": "lcurve.out", + "disp_freq": 100, + "numb_test": 10, + "save_freq": 1000, + "save_ckpt": "model.ckpt", + "load_ckpt": "model.ckpt", + "disp_training":true, + "time_training":true, + + "_comment": "that's all" +} + diff --git a/source/train/Data.py b/source/train/Data.py index 18f8e280ec..399a13712b 100644 --- a/source/train/Data.py +++ b/source/train/Data.py @@ -359,6 +359,14 @@ def __init__ (self, self.has_fparam = 0 else : self.has_fparam = -1 + # check aparam + has_aparam = [ os.path.isfile(os.path.join(ii, 'aparam.npy')) for ii in self.dirs ] + if any(has_aparam) and (not all(has_aparam)) : + raise RuntimeError("system %s: if any set has frame parameter, then all sets should have frame parameter" % sys_path) + if all(has_aparam) : + self.has_aparam = 0 + else : + self.has_aparam = -1 # energy norm self.eavg = self.stats_energy() # load sets @@ -463,6 +471,12 @@ def load_set(self, set_name, shuffle = True): self.has_fparam = data["fparam"].shape[1] else : assert self.has_fparam == data["fparam"].shape[1] + if self.has_aparam >= 0: + data["aparam"] = self.load_data(set_name, "aparam", [nframe, -1]) + if self.has_aparam == 0 : + self.has_aparam = data["aparam"].shape[1] // (ncoord//3) + else : + assert self.has_aparam == data["aparam"].shape[1] // (ncoord//3) data["prop_c"] = np.zeros(5) data["prop_c"][0], data["energy"], data["prop_c"][3], data["atom_ener"] \ = self.load_energy (set_name, nframe, ncoord // 3, "energy", "atom_ener") @@ -573,3 +587,6 @@ def get_ener (self) : def numb_fparam(self) : return self.has_fparam + def numb_aparam(self) : + return self.has_aparam + diff --git a/source/train/DeepDipole.py b/source/train/DeepDipole.py new file mode 100644 index 0000000000..3ef107b4a2 --- /dev/null +++ b/source/train/DeepDipole.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 + +import os,sys +import numpy as np +from deepmd.DeepEval import DeepTensor + +class DeepDipole (DeepTensor) : + def __init__(self, + model_file) : + DeepTensor.__init__(self, model_file, 'dipole', 3) + diff --git a/source/train/DeepPot.py b/source/train/DeepPot.py index ea55a1d0d6..dc62e56d8e 100644 --- a/source/train/DeepPot.py +++ b/source/train/DeepPot.py @@ -14,6 +14,7 @@ def __init__(self, self.t_ntypes = self.graph.get_tensor_by_name ('load/descrpt_attr/ntypes:0') self.t_rcut = self.graph.get_tensor_by_name ('load/descrpt_attr/rcut:0') self.t_dfparam= self.graph.get_tensor_by_name ('load/fitting_attr/dfparam:0') + self.t_daparam= self.graph.get_tensor_by_name ('load/fitting_attr/daparam:0') self.t_tmap = self.graph.get_tensor_by_name ('load/model_attr/tmap:0') # inputs self.t_coord = self.graph.get_tensor_by_name ('load/t_coord:0') @@ -28,14 +29,20 @@ def __init__(self, self.t_ae = self.graph.get_tensor_by_name ('load/o_atom_energy:0') self.t_av = self.graph.get_tensor_by_name ('load/o_atom_virial:0') self.t_fparam = None + self.t_aparam = None # check if the graph has fparam for op in self.graph.get_operations(): if op.name == 'load/t_fparam' : self.t_fparam = self.graph.get_tensor_by_name ('load/t_fparam:0') self.has_fparam = self.t_fparam is not None + # check if the graph has aparam + for op in self.graph.get_operations(): + if op.name == 'load/t_aparam' : + self.t_aparam = self.graph.get_tensor_by_name ('load/t_aparam:0') + self.has_aparam = self.t_aparam is not None # start a tf session associated to the graph self.sess = tf.Session (graph = self.graph) - [self.ntypes, self.rcut, self.dfparam, self.tmap] = self.sess.run([self.t_ntypes, self.t_rcut, self.t_dfparam, self.t_tmap]) + [self.ntypes, self.rcut, self.dfparam, self.daparam, self.tmap] = self.sess.run([self.t_ntypes, self.t_rcut, self.t_dfparam, self.t_daparam, self.t_tmap]) self.tmap = self.tmap.decode('UTF-8').split() @@ -48,6 +55,9 @@ def get_rcut(self) : def get_dim_fparam(self) : return self.dfparam + def get_dim_aparam(self) : + return self.daparam + def get_type_map(self): return self.tmap @@ -57,6 +67,7 @@ def eval(self, cells, atom_types, fparam = None, + aparam = None, atomic = False) : # standarize the shape of inputs coords = np.array(coords) @@ -65,6 +76,9 @@ def eval(self, if self.has_fparam : assert(fparam is not None) fparam = np.array(fparam) + if self.has_aparam : + assert(aparam is not None) + aparam = np.array(aparam) # reshape the inputs cells = np.reshape(cells, [-1, 9]) @@ -79,6 +93,16 @@ def eval(self, fparam = np.tile(fparam.reshape([-1]), [nframes, 1]) else : raise RuntimeError('got wrong size of frame param, should be either %d x %d or %d' % (nframes, fdim, fdim)) + if self.has_aparam : + fdim = self.get_dim_aparam() + if aparam.size == nframes * natoms * fdim: + aparam = np.reshape(aparam, [nframes, natoms * fdim]) + elif aparam.size == natoms * fdim : + aparam = np.tile(aparam.reshape([-1]), [nframes, 1]) + elif aparam.size == fdim : + aparam = np.tile(aparam.reshape([-1]), [nframes, natoms]) + else : + raise RuntimeError('got wrong size of frame param, should be either %d x %d x %d or %d x %d or %d' % (nframes, natoms, fdim, natoms, fdim, fdim)) # sort inputs coords, atom_types, imap = self.sort_input(coords, atom_types) @@ -109,6 +133,8 @@ def eval(self, feed_dict_test[self.t_box ] = np.reshape(cells [ii:ii+1, :], [-1]) if self.has_fparam: feed_dict_test[self.t_fparam] = np.reshape(fparam[ii:ii+1, :], [-1]) + if self.has_aparam: + feed_dict_test[self.t_aparam] = np.reshape(aparam[ii:ii+1, :], [-1]) v_out = self.sess.run (t_out, feed_dict = feed_dict_test) energy.append(v_out[0]) force .append(v_out[1]) diff --git a/source/train/DescrptLocFrame.py b/source/train/DescrptLocFrame.py index 9e2e54558e..ab00ac31e2 100644 --- a/source/train/DescrptLocFrame.py +++ b/source/train/DescrptLocFrame.py @@ -61,8 +61,7 @@ def compute_dstats (self, data_box, data_atype, natoms_vec, - mesh, - reuse = None) : + mesh) : all_davg = [] all_dstd = [] if True: @@ -71,7 +70,7 @@ def compute_dstats (self, sumv2 = [] for cc,bb,tt,nn,mm in zip(data_coord,data_box,data_atype,natoms_vec,mesh) : sysv,sysv2,sysn \ - = self._compute_dstats_sys_nonsmth(cc,bb,tt,nn,mm,reuse) + = self._compute_dstats_sys_nonsmth(cc,bb,tt,nn,mm) sumv.append(sysv) sumn.append(sysn) sumv2.append(sysv2) @@ -174,8 +173,7 @@ def _compute_dstats_sys_nonsmth (self, data_box, data_atype, natoms_vec, - mesh, - reuse = None) : + mesh) : avg_zero = np.zeros([self.ntypes,self.ndescrpt]).astype(global_np_float_precision) std_ones = np.ones ([self.ntypes,self.ndescrpt]).astype(global_np_float_precision) sub_graph = tf.Graph() diff --git a/source/train/DescrptSeA.py b/source/train/DescrptSeA.py index 0cfb70d48c..aa5221d7c7 100644 --- a/source/train/DescrptSeA.py +++ b/source/train/DescrptSeA.py @@ -74,8 +74,7 @@ def compute_dstats (self, data_box, data_atype, natoms_vec, - mesh, - reuse = None) : + mesh) : all_davg = [] all_dstd = [] if True: @@ -86,7 +85,7 @@ def compute_dstats (self, suma2 = [] for cc,bb,tt,nn,mm in zip(data_coord,data_box,data_atype,natoms_vec,mesh) : sysr,sysr2,sysa,sysa2,sysn \ - = self._compute_dstats_sys_smth(cc,bb,tt,nn,mm,reuse) + = self._compute_dstats_sys_smth(cc,bb,tt,nn,mm) sumr.append(sysr) suma.append(sysa) sumn.append(sysn) @@ -141,12 +140,12 @@ def build (self, davg.shape, dtype = global_tf_float_precision, trainable = False, - initializer = tf.constant_initializer(davg, dtype = global_tf_float_precision)) + initializer = tf.constant_initializer(davg)) self.t_std = tf.get_variable('t_std', dstd.shape, dtype = global_tf_float_precision, trainable = False, - initializer = tf.constant_initializer(dstd, dtype = global_tf_float_precision)) + initializer = tf.constant_initializer(dstd)) coord = tf.reshape (coord_, [-1, natoms[1] * 3]) box = tf.reshape (box_, [-1, 9]) @@ -229,8 +228,7 @@ def _compute_dstats_sys_smth (self, data_box, data_atype, natoms_vec, - mesh, - reuse = None) : + mesh) : avg_zero = np.zeros([self.ntypes,self.ndescrpt]).astype(global_np_float_precision) std_ones = np.ones ([self.ntypes,self.ndescrpt]).astype(global_np_float_precision) sub_graph = tf.Graph() diff --git a/source/train/DescrptSeAR.py b/source/train/DescrptSeAR.py index cdd634b85e..929b660248 100644 --- a/source/train/DescrptSeAR.py +++ b/source/train/DescrptSeAR.py @@ -54,10 +54,9 @@ def compute_dstats (self, data_box, data_atype, natoms_vec, - mesh, - reuse = None) : - davg_a, dstd_a = self.descrpt_a.compute_dstats(data_coord, data_box, data_atype, natoms_vec, mesh, reuse) - davg_r, dstd_r = self.descrpt_r.compute_dstats(data_coord, data_box, data_atype, natoms_vec, mesh, reuse) + mesh) : + davg_a, dstd_a = self.descrpt_a.compute_dstats(data_coord, data_box, data_atype, natoms_vec, mesh) + davg_r, dstd_r = self.descrpt_r.compute_dstats(data_coord, data_box, data_atype, natoms_vec, mesh) return [davg_a, davg_r], [dstd_a, dstd_r] diff --git a/source/train/DescrptSeR.py b/source/train/DescrptSeR.py index 31eae9528e..b057a0afd3 100644 --- a/source/train/DescrptSeR.py +++ b/source/train/DescrptSeR.py @@ -67,8 +67,7 @@ def compute_dstats (self, data_box, data_atype, natoms_vec, - mesh, - reuse = None) : + mesh) : all_davg = [] all_dstd = [] sumr = [] @@ -76,7 +75,7 @@ def compute_dstats (self, sumr2 = [] for cc,bb,tt,nn,mm in zip(data_coord,data_box,data_atype,natoms_vec,mesh) : sysr,sysr2,sysn \ - = self._compute_dstats_sys_se_r(cc,bb,tt,nn,mm,reuse) + = self._compute_dstats_sys_se_r(cc,bb,tt,nn,mm) sumr.append(sysr) sumn.append(sysn) sumr2.append(sysr2) @@ -121,12 +120,12 @@ def build (self, davg.shape, dtype = global_tf_float_precision, trainable = False, - initializer = tf.constant_initializer(davg, dtype = global_tf_float_precision)) + initializer = tf.constant_initializer(davg)) self.t_std = tf.get_variable('t_std', dstd.shape, dtype = global_tf_float_precision, trainable = False, - initializer = tf.constant_initializer(dstd, dtype = global_tf_float_precision)) + initializer = tf.constant_initializer(dstd)) coord = tf.reshape (coord_, [-1, natoms[1] * 3]) box = tf.reshape (box_, [-1, 9]) @@ -194,8 +193,7 @@ def _compute_dstats_sys_se_r (self, data_box, data_atype, natoms_vec, - mesh, - reuse = None) : + mesh) : avg_zero = np.zeros([self.ntypes,self.ndescrpt]).astype(global_np_float_precision) std_ones = np.ones ([self.ntypes,self.ndescrpt]).astype(global_np_float_precision) sub_graph = tf.Graph() diff --git a/source/train/Fitting.py b/source/train/Fitting.py index 37269d0a9a..59ea7a5ed8 100644 --- a/source/train/Fitting.py +++ b/source/train/Fitting.py @@ -20,22 +20,70 @@ def __init__ (self, jdata, descrpt): self.dim_descrpt = descrpt.get_dim_out() args = ClassArg()\ .add('numb_fparam', int, default = 0)\ + .add('numb_aparam', int, default = 0)\ .add('neuron', list, default = [120,120,120], alias = 'n_neuron')\ .add('resnet_dt', bool, default = True)\ .add('seed', int) class_data = args.parse(jdata) self.numb_fparam = class_data['numb_fparam'] + self.numb_aparam = class_data['numb_aparam'] self.n_neuron = class_data['neuron'] self.resnet_dt = class_data['resnet_dt'] self.seed = class_data['seed'] self.useBN = False # data requirement if self.numb_fparam > 0 : - add_data_requirement('fparam', self.numb_fparam, atomic=False, must=False, high_prec=False) + add_data_requirement('fparam', self.numb_fparam, atomic=False, must=True, high_prec=False) + self.fparam_avg = None + self.fparam_std = None + self.fparam_inv_std = None + if self.numb_aparam > 0: + add_data_requirement('aparam', self.numb_aparam, atomic=True, must=True, high_prec=False) + self.aparam_avg = None + self.aparam_std = None + self.aparam_inv_std = None def get_numb_fparam(self) : return self.numb_fparam + def get_numb_aparam(self) : + return self.numb_fparam + + def compute_dstats(self, all_stat, protection): + # stat fparam + if self.numb_fparam > 0: + cat_data = np.concatenate(all_stat['fparam'], axis = 0) + cat_data = np.reshape(cat_data, [-1, self.numb_fparam]) + self.fparam_avg = np.average(cat_data, axis = 0) + self.fparam_std = np.std(cat_data, axis = 0) + for ii in range(self.fparam_std.size): + if self.fparam_std[ii] < protection: + self.fparam_std[ii] = protection + self.fparam_inv_std = 1./self.fparam_std + # stat aparam + if self.numb_aparam > 0: + sys_sumv = [] + sys_sumv2 = [] + sys_sumn = [] + for ss_ in all_stat['aparam'] : + ss = np.reshape(ss_, [-1, self.numb_aparam]) + sys_sumv.append(np.sum(ss, axis = 0)) + sys_sumv2.append(np.sum(np.multiply(ss, ss), axis = 0)) + sys_sumn.append(ss.shape[0]) + sumv = np.sum(sys_sumv, axis = 0) + sumv2 = np.sum(sys_sumv2, axis = 0) + sumn = np.sum(sys_sumn) + self.aparam_avg = (sumv)/sumn + self.aparam_std = self._compute_std(sumv2, sumv, sumn) + for ii in range(self.aparam_std.size): + if self.aparam_std[ii] < protection: + self.aparam_std[ii] = protection + self.aparam_inv_std = 1./self.aparam_std + + def _compute_std (self, sumv2, sumv, sumn) : + return np.sqrt(sumv2/sumn - np.multiply(sumv/sumn, sumv/sumn)) + + def build (self, inputs, input_dict, @@ -43,10 +91,41 @@ def build (self, bias_atom_e = None, reuse = None, suffix = '') : + if self.numb_fparam > 0 and ( self.fparam_avg is None or self.fparam_inv_std is None ): + raise RuntimeError('No data stat result. one should do data statisitic, before build') + if self.numb_aparam > 0 and ( self.aparam_avg is None or self.aparam_inv_std is None ): + raise RuntimeError('No data stat result. one should do data statisitic, before build') + with tf.variable_scope('fitting_attr' + suffix, reuse = reuse) : t_dfparam = tf.constant(self.numb_fparam, name = 'dfparam', dtype = tf.int32) + t_daparam = tf.constant(self.numb_aparam, + name = 'daparam', + dtype = tf.int32) + if self.numb_fparam > 0: + t_fparam_avg = tf.get_variable('t_fparam_avg', + self.numb_fparam, + dtype = global_tf_float_precision, + trainable = False, + initializer = tf.constant_initializer(self.fparam_avg)) + t_fparam_istd = tf.get_variable('t_fparam_istd', + self.numb_fparam, + dtype = global_tf_float_precision, + trainable = False, + initializer = tf.constant_initializer(self.fparam_inv_std)) + if self.numb_aparam > 0: + t_aparam_avg = tf.get_variable('t_aparam_avg', + self.numb_aparam, + dtype = global_tf_float_precision, + trainable = False, + initializer = tf.constant_initializer(self.aparam_avg)) + t_aparam_istd = tf.get_variable('t_aparam_istd', + self.numb_aparam, + dtype = global_tf_float_precision, + trainable = False, + initializer = tf.constant_initializer(self.aparam_inv_std)) + start_index = 0 inputs = tf.reshape(inputs, [-1, self.dim_descrpt * natoms[0]]) shape = inputs.get_shape().as_list() @@ -54,25 +133,40 @@ def build (self, if bias_atom_e is not None : assert(len(bias_atom_e) == self.ntypes) + if self.numb_fparam > 0 : + fparam = input_dict['fparam'] + fparam = tf.reshape(fparam, [-1, self.numb_fparam]) + fparam = (fparam - t_fparam_avg) * t_fparam_istd + if self.numb_aparam > 0 : + aparam = input_dict['aparam'] + aparam = tf.reshape(aparam, [-1, self.numb_aparam]) + aparam = (aparam - t_aparam_avg) * t_aparam_istd + aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) + for type_i in range(self.ntypes): # cut-out inputs inputs_i = tf.slice (inputs, [ 0, start_index* self.dim_descrpt], [-1, natoms[2+type_i]* self.dim_descrpt] ) inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) + layer = inputs_i + if self.numb_fparam > 0 : + ext_fparam = tf.tile(fparam, [1, natoms[2+type_i]]) + ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam]) + layer = tf.concat([layer, ext_fparam], axis = 1) + if self.numb_aparam > 0 : + ext_aparam = tf.slice(aparam, + [ 0, start_index * self.numb_aparam], + [-1, natoms[2+type_i] * self.numb_aparam]) + ext_aparam = tf.reshape(ext_aparam, [-1, self.numb_aparam]) + layer = tf.concat([layer, ext_aparam], axis = 1) start_index += natoms[2+type_i] + if bias_atom_e is None : type_bias_ae = 0.0 else : type_bias_ae = bias_atom_e[type_i] - layer = inputs_i - if self.numb_fparam > 0 : - fparam = input_dict['fparam'] - ext_fparam = tf.reshape(fparam, [-1, self.numb_fparam]) - ext_fparam = tf.tile(ext_fparam, [1, natoms[2+type_i]]) - ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam]) - layer = tf.concat([layer, ext_fparam], axis = 1) for ii in range(0,len(self.n_neuron)) : if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt) @@ -117,6 +211,9 @@ def get_sel_type(self): def get_wfc_numb(self): return self.wfc_numb + def get_out_size(self): + return self.wfc_numb * 3 + def build (self, input_d, rot_mat, @@ -189,6 +286,9 @@ def __init__ (self, jdata, descrpt) : def get_sel_type(self): return self.sel_type + def get_out_size(self): + return 9 + def build (self, input_d, rot_mat, @@ -252,12 +352,14 @@ def __init__ (self, jdata, descrpt) : args = ClassArg()\ .add('neuron', list, default = [120,120,120], alias = 'n_neuron')\ .add('resnet_dt', bool, default = True)\ + .add('fit_diag', bool, default = True)\ .add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'pol_type')\ .add('seed', int) class_data = args.parse(jdata) self.n_neuron = class_data['neuron'] self.resnet_dt = class_data['resnet_dt'] self.sel_type = class_data['sel_type'] + self.fit_diag = class_data['fit_diag'] self.seed = class_data['seed'] self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1() self.dim_rot_mat = self.dim_rot_mat_1 * 3 @@ -266,6 +368,9 @@ def __init__ (self, jdata, descrpt) : def get_sel_type(self): return self.sel_type + def get_out_size(self): + return 9 + def build (self, input_d, rot_mat, @@ -297,12 +402,20 @@ def build (self, layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt) else : layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed) - # (nframes x natoms) x (naxis x naxis) - final_layer = one_layer(layer, self.dim_rot_mat_1*self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed) - # (nframes x natoms) x naxis x naxis - final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1, self.dim_rot_mat_1]) - # (nframes x natoms) x naxis x naxis - final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1]) + if self.fit_diag : + # (nframes x natoms) x naxis + final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed) + # (nframes x natoms) x naxis + final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1]) + # (nframes x natoms) x naxis x naxis + final_layer = tf.matrix_diag(final_layer) + else : + # (nframes x natoms) x (naxis x naxis) + final_layer = one_layer(layer, self.dim_rot_mat_1*self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed) + # (nframes x natoms) x naxis x naxis + final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1, self.dim_rot_mat_1]) + # (nframes x natoms) x naxis x naxis + final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1]) # (nframes x natoms) x naxis x 3(coord) final_layer = tf.matmul(final_layer, rot_mat_i) # (nframes x natoms) x 3(coord) x 3(coord) @@ -318,3 +431,104 @@ def build (self, count += 1 return tf.reshape(outs, [-1]) + + +class GlobalPolarFittingSeA () : + def __init__ (self, jdata, descrpt) : + if not isinstance(descrpt, DescrptSeA) : + raise RuntimeError('GlobalPolarFittingSeA only supports DescrptSeA') + self.ntypes = descrpt.get_ntypes() + self.dim_descrpt = descrpt.get_dim_out() + self.polar_fitting = PolarFittingSeA(jdata, descrpt) + + def get_sel_type(self): + return self.polar_fitting.get_sel_type() + + def get_out_size(self): + return self.polar_fitting.get_out_size() + + def build (self, + input_d, + rot_mat, + natoms, + reuse = None, + suffix = '') : + inputs = tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]) + outs = self.polar_fitting.build(input_d, rot_mat, natoms, reuse, suffix) + # nframes x natoms x 9 + outs = tf.reshape(outs, [tf.shape(inputs)[0], -1, 9]) + outs = tf.reduce_sum(outs, axis = 1) + return tf.reshape(outs, [-1]) + + +class DipoleFittingSeA () : + def __init__ (self, jdata, descrpt) : + if not isinstance(descrpt, DescrptSeA) : + raise RuntimeError('DipoleFittingSeA only supports DescrptSeA') + self.ntypes = descrpt.get_ntypes() + self.dim_descrpt = descrpt.get_dim_out() + args = ClassArg()\ + .add('neuron', list, default = [120,120,120], alias = 'n_neuron')\ + .add('resnet_dt', bool, default = True)\ + .add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'dipole_type')\ + .add('seed', int) + class_data = args.parse(jdata) + self.n_neuron = class_data['neuron'] + self.resnet_dt = class_data['resnet_dt'] + self.sel_type = class_data['sel_type'] + self.seed = class_data['seed'] + self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1() + self.dim_rot_mat = self.dim_rot_mat_1 * 3 + self.useBN = False + + def get_sel_type(self): + return self.sel_type + + def build (self, + input_d, + rot_mat, + natoms, + reuse = None, + suffix = '') : + start_index = 0 + inputs = tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]) + rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]]) + shape = inputs.get_shape().as_list() + + count = 0 + for type_i in range(self.ntypes): + # cut-out inputs + inputs_i = tf.slice (inputs, + [ 0, start_index* self.dim_descrpt], + [-1, natoms[2+type_i]* self.dim_descrpt] ) + inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) + rot_mat_i = tf.slice (rot_mat, + [ 0, start_index* self.dim_rot_mat], + [-1, natoms[2+type_i]* self.dim_rot_mat] ) + rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3]) + start_index += natoms[2+type_i] + if not type_i in self.sel_type : + continue + layer = inputs_i + for ii in range(0,len(self.n_neuron)) : + if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : + layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt) + else : + layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed) + # (nframes x natoms) x naxis + final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed) + # (nframes x natoms) x 1 * naxis + final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], 1, self.dim_rot_mat_1]) + # (nframes x natoms) x 1 x 3(coord) + final_layer = tf.matmul(final_layer, rot_mat_i) + # nframes x natoms x 3 + final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3]) + + # concat the results + if count == 0: + outs = final_layer + else: + outs = tf.concat([outs, final_layer], axis = 1) + count += 1 + + return tf.reshape(outs, [-1]) diff --git a/source/train/Local.py b/source/train/Local.py index 97abbefe68..d2564ff17c 100644 --- a/source/train/Local.py +++ b/source/train/Local.py @@ -5,6 +5,9 @@ def get_resource (): nodelist = [nodename] gpus = os.getenv('CUDA_VISIBLE_DEVICES') if gpus is not None : - gpus = gpus.split(",") - gpus = [int(ii) for ii in gpus] + if gpus != "" : + gpus = gpus.split(",") + gpus = [int(ii) for ii in gpus] + else : + gpus = [] return nodename, nodelist, gpus diff --git a/source/train/Loss.py b/source/train/Loss.py index d12a79d7ec..6d9abf7026 100644 --- a/source/train/Loss.py +++ b/source/train/Loss.py @@ -22,7 +22,8 @@ def __init__ (self, jdata, **kwarg) : .add('start_pref_ae', float, default = 0)\ .add('limit_pref_ae', float, default = 0)\ .add('start_pref_pf', float, default = 0)\ - .add('limit_pref_pf', float, default = 0) + .add('limit_pref_pf', float, default = 0)\ + .add('relative_f', float) class_data = args.parse(jdata) self.start_pref_e = class_data['start_pref_e'] self.limit_pref_e = class_data['limit_pref_e'] @@ -34,6 +35,7 @@ def __init__ (self, jdata, **kwarg) : self.limit_pref_ae = class_data['limit_pref_ae'] self.start_pref_pf = class_data['start_pref_pf'] self.limit_pref_pf = class_data['limit_pref_pf'] + self.relative_f = class_data['relative_f'] self.has_e = (self.start_pref_e != 0 or self.limit_pref_e != 0) self.has_f = (self.start_pref_f != 0 or self.limit_pref_f != 0) self.has_v = (self.start_pref_v != 0 or self.limit_pref_v != 0) @@ -72,8 +74,15 @@ def build (self, force_reshape = tf.reshape (force, [-1]) force_hat_reshape = tf.reshape (force_hat, [-1]) atom_pref_reshape = tf.reshape (atom_pref, [-1]) - l2_force_loss = tf.reduce_mean (tf.square(force_hat_reshape - force_reshape), name = "l2_force_" + suffix) - l2_pref_force_loss = tf.reduce_mean (tf.multiply(tf.square(force_hat_reshape - force_reshape), atom_pref_reshape), name = "l2_pref_force_" + suffix) + diff_f = force_hat_reshape - force_reshape + if self.relative_f is not None: + force_hat_3 = tf.reshape(force_hat, [-1, 3]) + norm_f = tf.reshape(tf.norm(force_hat_3, axis = 1), [-1, 1]) + self.relative_f + diff_f_3 = tf.reshape(diff_f, [-1, 3]) + diff_f_3 = diff_f_3 / norm_f + diff_f = tf.reshape(diff_f_3, [-1]) + l2_force_loss = tf.reduce_mean(tf.square(diff_f), name = "l2_force_" + suffix) + l2_pref_force_loss = tf.reduce_mean(tf.multiply(tf.square(diff_f), atom_pref_reshape), name = "l2_pref_force_" + suffix) virial_reshape = tf.reshape (virial, [-1]) virial_hat_reshape = tf.reshape (virial_hat, [-1]) @@ -166,57 +175,8 @@ def print_on_training(self, if self.has_pf: print_str += prop_fmt % (np.sqrt(error_pf_test) / natoms[0], np.sqrt(error_pf_train) / natoms[0]) - return print_str - - - -class WFCLoss () : - def __init__ (self, jdata, **kwarg) : - model = kwarg['model'] - # data required - add_data_requirement('wfc', - model.get_wfc_numb() * 3, - atomic=True, - must=True, - high_prec=False, - type_sel = model.get_sel_type()) - - def build (self, - learning_rate, - natoms, - model_dict, - label_dict, - suffix): - wfc_hat = label_dict['wfc'] - wfc = model_dict['wfc'] - l2_loss = tf.reduce_mean( tf.square(wfc - wfc_hat), name='l2_'+suffix) - self.l2_l = l2_loss - more_loss = {} + return print_str - return l2_loss, more_loss - - def print_header(self) : - prop_fmt = ' %9s %9s' - print_str = '' - print_str += prop_fmt % ('l2_tst', 'l2_trn') - return print_str - - def print_on_training(self, - sess, - natoms, - feed_dict_test, - feed_dict_batch) : - error_test\ - = sess.run([self.l2_l], \ - feed_dict=feed_dict_test) - error_train\ - = sess.run([self.l2_l], \ - feed_dict=feed_dict_batch) - print_str = "" - prop_fmt = " %9.2e %9.2e" - print_str += prop_fmt % (np.sqrt(error_test), np.sqrt(error_train)) - - return print_str class TensorLoss () : @@ -229,13 +189,14 @@ def __init__ (self, jdata, **kwarg) : self.tensor_name = kwarg['tensor_name'] self.tensor_size = kwarg['tensor_size'] self.label_name = kwarg['label_name'] + self.atomic = kwarg.get('atomic', True) # data required add_data_requirement(self.label_name, self.tensor_size, - atomic=True, + atomic=self.atomic, must=True, high_prec=False, - type_sel = model.get_sel_type()) + type_sel = type_sel) def build (self, learning_rate, @@ -246,6 +207,9 @@ def build (self, polar_hat = label_dict[self.label_name] polar = model_dict[self.tensor_name] l2_loss = tf.reduce_mean( tf.square(polar - polar_hat), name='l2_'+suffix) + if not self.atomic : + atom_norm = 1./ global_cvt_2_tf_float(natoms[0]) + l2_loss = l2_loss * atom_norm self.l2_l = l2_loss more_loss = {} diff --git a/source/train/Model.py b/source/train/Model.py index 3f7c61cd79..9cbb8e1123 100644 --- a/source/train/Model.py +++ b/source/train/Model.py @@ -36,11 +36,15 @@ def __init__ (self, jdata, descrpt, fitting): args = ClassArg()\ .add('type_map', list, default = []) \ .add('rcond', float, default = 1e-3) \ + .add('data_stat_nbatch', int, default = 10) \ + .add('data_stat_protect',float, default = 1e-2) \ .add('use_srtab', str) class_data = args.parse(jdata) self.type_map = class_data['type_map'] self.srtab_name = class_data['use_srtab'] self.rcond = class_data['rcond'] + self.data_stat_nbatch = class_data['data_stat_nbatch'] + self.data_stat_protect = class_data['data_stat_protect'] if self.srtab_name is not None : self.srtab = TabInter(self.srtab_name) args.add('smin_alpha', float, must = True)\ @@ -66,29 +70,24 @@ def get_type_map (self) : def data_stat(self, data): all_stat = defaultdict(list) for ii in range(data.get_nsystems()) : - stat_data = data.get_batch (sys_idx = ii) - for dd in stat_data: - if dd == "natoms_vec": - stat_data[dd] = stat_data[dd].astype(np.int32) - all_stat[dd].append(stat_data[dd]) - - self._compute_dstats (all_stat['coord'], - all_stat['box'], - all_stat['type'], - all_stat['natoms_vec'], - all_stat['default_mesh']) + for jj in range(self.data_stat_nbatch) : + stat_data = data.get_batch (sys_idx = ii) + for dd in stat_data: + if dd == "natoms_vec": + stat_data[dd] = stat_data[dd].astype(np.int32) + all_stat[dd].append(stat_data[dd]) + self._compute_dstats (all_stat, protection = self.data_stat_protect) self.bias_atom_e = data.compute_energy_shift(self.rcond) - def _compute_dstats (self, - data_coord, - data_box, - data_atype, - natoms_vec, - mesh, - reuse = None) : + def _compute_dstats (self, all_stat, protection = 1e-2) : self.davg, self.dstd \ - = self.descrpt.compute_dstats(data_coord, data_box, data_atype, natoms_vec, mesh, reuse) + = self.descrpt.compute_dstats(all_stat['coord'], + all_stat['box'], + all_stat['type'], + all_stat['natoms_vec'], + all_stat['default_mesh']) + self.fitting.compute_dstats(all_stat, protection = protection) def build (self, coord_, @@ -223,12 +222,10 @@ def build (self, return model_dict - - -class WFCModel() : - model_type = 'wfc' - def __init__ (self, jdata, descrpt, fitting): +class TensorModel() : + def __init__ (self, jdata, descrpt, fitting, var_name): + self.model_type = var_name self.descrpt = descrpt self.rcut = self.descrpt.get_rcut() self.ntypes = self.descrpt.get_ntypes() @@ -236,10 +233,12 @@ def __init__ (self, jdata, descrpt, fitting): self.fitting = fitting args = ClassArg()\ - .add('type_map', list, default = []) + .add('type_map', list, default = []) \ + .add('data_stat_nbatch', int, default = 10) class_data = args.parse(jdata) self.type_map = class_data['type_map'] - + self.data_stat_nbatch = class_data['data_stat_nbatch'] + def get_rcut (self) : return self.rcut @@ -249,37 +248,30 @@ def get_ntypes (self) : def get_type_map (self) : return self.type_map + def get_sel_type(self): + return self.fitting.get_sel_type() + + def get_out_size (self) : + return self.fitting.get_out_size() + def data_stat(self, data): all_stat = defaultdict(list) for ii in range(data.get_nsystems()) : - stat_data = data.get_batch (sys_idx = ii) - for dd in stat_data: - if dd == "natoms_vec": - stat_data[dd] = stat_data[dd].astype(np.int32) - all_stat[dd].append(stat_data[dd]) - - self._compute_dstats (all_stat['coord'], - all_stat['box'], - all_stat['type'], - all_stat['natoms_vec'], - all_stat['default_mesh']) - - - def _compute_dstats (self, - data_coord, - data_box, - data_atype, - natoms_vec, - mesh, - reuse = None) : + for jj in range(self.data_stat_nbatch) : + stat_data = data.get_batch (sys_idx = ii) + for dd in stat_data: + if dd == "natoms_vec": + stat_data[dd] = stat_data[dd].astype(np.int32) + all_stat[dd].append(stat_data[dd]) + self._compute_dstats (all_stat) + + def _compute_dstats (self, all_stat) : self.davg, self.dstd \ - = self.descrpt.compute_dstats(data_coord, data_box, data_atype, natoms_vec, mesh, reuse) - - def get_sel_type(self): - return self.fitting.get_sel_type() - - def get_wfc_numb(self): - return self.fitting.get_wfc_numb() + = self.descrpt.compute_dstats(all_stat['coord'], + all_stat['box'], + all_stat['type'], + all_stat['natoms_vec'], + all_stat['default_mesh']) def build (self, coord_, @@ -290,7 +282,6 @@ def build (self, input_dict, suffix = '', reuse = None): - with tf.variable_scope('model_attr' + suffix, reuse = reuse) : t_tmap = tf.constant(' '.join(self.type_map), name = 'tmap', @@ -319,113 +310,33 @@ def build (self, rot_mat = self.descrpt.get_rot_mat() rot_mat = tf.identity(rot_mat, name = 'o_rot_mat') - wfc = self.fitting.build (dout, - rot_mat, - natoms, - reuse = reuse, - suffix = suffix) - wfc = tf.identity(wfc, name = 'o_wfc') - - return {'wfc': wfc} + output = self.fitting.build (dout, + rot_mat, + natoms, + reuse = reuse, + suffix = suffix) + output = tf.identity(output, name = 'o_' + self.model_type) + return {self.model_type: output} -class PolarModel() : - model_type = 'polar' - def __init__ (self, jdata, descrpt, fitting): - self.descrpt = descrpt - self.rcut = self.descrpt.get_rcut() - self.ntypes = self.descrpt.get_ntypes() - # fitting - self.fitting = fitting +class WFCModel(TensorModel): + def __init__(self, jdata, descrpt, fitting) : + TensorModel.__init__(self, jdata, descrpt, fitting, 'wfc') - args = ClassArg()\ - .add('type_map', list, default = []) - class_data = args.parse(jdata) - self.type_map = class_data['type_map'] - def get_rcut (self) : - return self.rcut +class DipoleModel(TensorModel): + def __init__(self, jdata, descrpt, fitting) : + TensorModel.__init__(self, jdata, descrpt, fitting, 'dipole') - def get_ntypes (self) : - return self.ntypes - def get_type_map (self) : - return self.type_map +class PolarModel(TensorModel): + def __init__(self, jdata, descrpt, fitting) : + TensorModel.__init__(self, jdata, descrpt, fitting, 'polar') - def data_stat(self, data): - all_stat = defaultdict(list) - for ii in range(data.get_nsystems()) : - stat_data = data.get_batch (sys_idx = ii) - for dd in stat_data: - if dd == "natoms_vec": - stat_data[dd] = stat_data[dd].astype(np.int32) - all_stat[dd].append(stat_data[dd]) - - self._compute_dstats (all_stat['coord'], - all_stat['box'], - all_stat['type'], - all_stat['natoms_vec'], - all_stat['default_mesh']) - - - def _compute_dstats (self, - data_coord, - data_box, - data_atype, - natoms_vec, - mesh, - reuse = None) : - self.davg, self.dstd \ - = self.descrpt.compute_dstats(data_coord, data_box, data_atype, natoms_vec, mesh, reuse) - - def get_sel_type(self): - return self.fitting.get_sel_type() - - def build (self, - coord_, - atype_, - natoms, - box, - mesh, - input_dict, - suffix = '', - reuse = None): - - with tf.variable_scope('model_attr' + suffix, reuse = reuse) : - t_tmap = tf.constant(' '.join(self.type_map), - name = 'tmap', - dtype = tf.string) - t_st = tf.constant(self.get_sel_type(), - name = 'sel_type', - dtype = tf.int32) - t_mt = tf.constant(self.model_type, - name = 'model_type', - dtype = tf.string) - - coord = tf.reshape (coord_, [-1, natoms[1] * 3]) - atype = tf.reshape (atype_, [-1, natoms[1]]) - - dout \ - = self.descrpt.build(coord_, - atype_, - natoms, - box, - mesh, - davg = self.davg, - dstd = self.dstd, - suffix = suffix, - reuse = reuse) - dout = tf.identity(dout, name='o_descriptor') - rot_mat = self.descrpt.get_rot_mat() - rot_mat = tf.identity(rot_mat, name = 'o_rot_mat') - polar = self.fitting.build (dout, - rot_mat, - natoms, - reuse = reuse, - suffix = suffix) - polar = tf.identity(polar, name = 'o_polar') +class GlobalPolarModel(TensorModel): + def __init__(self, jdata, descrpt, fitting) : + TensorModel.__init__(self, jdata, descrpt, fitting, 'global_polar') - return {'polar': polar} diff --git a/source/train/Trainer.py b/source/train/Trainer.py index 804a5f0aea..10472d828b 100644 --- a/source/train/Trainer.py +++ b/source/train/Trainer.py @@ -12,13 +12,13 @@ from deepmd.RunOptions import global_ener_float_precision from deepmd.RunOptions import global_cvt_2_tf_float from deepmd.RunOptions import global_cvt_2_ener_float -from deepmd.Fitting import EnerFitting, WFCFitting, PolarFittingLocFrame, PolarFittingSeA +from deepmd.Fitting import EnerFitting, WFCFitting, PolarFittingLocFrame, PolarFittingSeA, GlobalPolarFittingSeA, DipoleFittingSeA from deepmd.DescrptLocFrame import DescrptLocFrame from deepmd.DescrptSeA import DescrptSeA from deepmd.DescrptSeR import DescrptSeR from deepmd.DescrptSeAR import DescrptSeAR -from deepmd.Model import Model, WFCModel, PolarModel -from deepmd.Loss import EnerStdLoss, WFCLoss, TensorLoss +from deepmd.Model import Model, WFCModel, DipoleModel, PolarModel, GlobalPolarModel +from deepmd.Loss import EnerStdLoss, TensorLoss from deepmd.LearningRate import LearningRateExp from tensorflow.python.framework import ops @@ -94,6 +94,11 @@ def _init_param(self, jdata): self.fitting = EnerFitting(fitting_param, self.descrpt) elif fitting_type == 'wfc': self.fitting = WFCFitting(fitting_param, self.descrpt) + elif fitting_type == 'dipole': + if descrpt_type == 'se_a': + self.fitting = DipoleFittingSeA(fitting_param, self.descrpt) + else : + raise RuntimeError('fitting dipole only supports descrptors: se_a') elif fitting_type == 'polar': if descrpt_type == 'loc_frame': self.fitting = PolarFittingLocFrame(fitting_param, self.descrpt) @@ -101,6 +106,11 @@ def _init_param(self, jdata): self.fitting = PolarFittingSeA(fitting_param, self.descrpt) else : raise RuntimeError('fitting polar only supports descrptors: loc_frame and se_a') + elif fitting_type == 'global_polar': + if descrpt_type == 'se_a': + self.fitting = GlobalPolarFittingSeA(fitting_param, self.descrpt) + else : + raise RuntimeError('fitting global_polar only supports descrptors: loc_frame and se_a') else : raise RuntimeError('unknow fitting type ' + fitting_type) @@ -108,10 +118,14 @@ def _init_param(self, jdata): # infer model type by fitting_type if fitting_type == Model.model_type: self.model = Model(model_param, self.descrpt, self.fitting) - elif fitting_type == WFCModel.model_type: + elif fitting_type == 'wfc': self.model = WFCModel(model_param, self.descrpt, self.fitting) - elif fitting_type == PolarModel.model_type: + elif fitting_type == 'dipole': + self.model = DipoleModel(model_param, self.descrpt, self.fitting) + elif fitting_type == 'polar': self.model = PolarModel(model_param, self.descrpt, self.fitting) + elif fitting_type == 'global_polar': + self.model = GlobalPolarModel(model_param, self.descrpt, self.fitting) else : raise RuntimeError('get unknown fitting type when building model') @@ -135,13 +149,30 @@ def _init_param(self, jdata): if fitting_type == 'ener': self.loss = EnerStdLoss(loss_param, starter_learning_rate = self.lr.start_lr()) elif fitting_type == 'wfc': - self.loss = WFCLoss(loss_param, model = self.model) + self.loss = TensorLoss(loss_param, + model = self.model, + tensor_name = 'wfc', + tensor_size = self.model.get_out_size(), + label_name = 'wfc') + elif fitting_type == 'dipole': + self.loss = TensorLoss(loss_param, + model = self.model, + tensor_name = 'dipole', + tensor_size = 3, + label_name = 'dipole') elif fitting_type == 'polar': self.loss = TensorLoss(loss_param, model = self.model, tensor_name = 'polar', tensor_size = 9, label_name = 'polarizability') + elif fitting_type == 'global_polar': + self.loss = TensorLoss(loss_param, + model = self.model, + tensor_name = 'global_polar', + tensor_size = 9, + atomic = False, + label_name = 'polarizability') else : raise RuntimeError('get unknown fitting type when building loss function') diff --git a/source/train/env.py b/source/train/env.py index 859ada34bf..22630724ff 100644 --- a/source/train/env.py +++ b/source/train/env.py @@ -18,9 +18,15 @@ def set_env_if_empty(key, value): def set_mkl(): """Tuning MKL for the best performance https://www.tensorflow.org/guide/performance/overview + + Fixing an issue in numpy built by MKL. See + https://github.com/ContinuumIO/anaconda-issues/issues/11367 + https://github.com/numpy/numpy/issues/12374 """ - if 'mkl_info' in np.__config__.__dict__: + # check whether the numpy is built by mkl, see + # https://github.com/numpy/numpy/issues/14751 + if 'mkl_rt' in np.__config__.get_info("blas_mkl_info").get('libraries', []): set_env_if_empty("KMP_BLOCKTIME", "0") set_env_if_empty("KMP_AFFINITY", "granularity=fine,verbose,compact,1,0") reload(np) diff --git a/source/train/test.py b/source/train/test.py index 074be65a31..83f882e32b 100755 --- a/source/train/test.py +++ b/source/train/test.py @@ -10,6 +10,7 @@ from deepmd.Data import DeepmdData from deepmd import DeepEval from deepmd import DeepPot +from deepmd import DeepDipole from deepmd import DeepPolar from deepmd import DeepWFC from tensorflow.python.framework import ops @@ -18,6 +19,8 @@ def test (args): de = DeepEval(args.model) if de.model_type == 'ener': test_ener(args) + elif de.model_type == 'dipole': + test_dipole(args) elif de.model_type == 'polar': test_polar(args) elif de.model_type == 'wfc': @@ -42,7 +45,15 @@ def test_ener (args) : coord = test_data["coord"][:numb_test].reshape([numb_test, -1]) box = test_data["box"][:numb_test] atype = test_data["type"][0] - energy, force, virial, ae, av = dp.eval(coord, box, atype, fparam = (test_data["fparam"] if "fparam" in test_data else None), atomic = True) + if dp.get_dim_fparam() > 0: + fparam = test_data["fparam"][:numb_test] + else : + fparam = None + if dp.get_dim_aparam() > 0: + aparam = test_data["aparam"][:numb_test] + else : + aparam = None + energy, force, virial, ae, av = dp.eval(coord, box, atype, fparam = fparam, aparam = aparam, atomic = True) energy = energy.reshape([numb_test,1]) force = force.reshape([numb_test,-1]) virial = virial.reshape([numb_test,9]) @@ -146,3 +157,36 @@ def test_polar (args) : axis = 1) np.savetxt(detail_file+".out", pe, header = 'data_pxx data_pxy data_pxz data_pyx data_pyy data_pyz data_pzx data_pzy data_pzz pred_pxx pred_pxy pred_pxz pred_pyx pred_pyy pred_pyz pred_pzx pred_pzy pred_pzz') + + +def test_dipole (args) : + if args.rand_seed is not None : + np.random.seed(args.rand_seed % (2**32)) + + dp = DeepDipole(args.model) + data = DeepmdData(args.system, args.set_prefix, shuffle_test = args.shuffle_test) + data.add('dipole', 3, atomic=True, must=True, high_prec=False, type_sel = dp.get_sel_type()) + test_data = data.get_test () + numb_test = args.numb_test + natoms = len(test_data["type"][0]) + nframes = test_data["box"].shape[0] + numb_test = min(nframes, numb_test) + + coord = test_data["coord"][:numb_test].reshape([numb_test, -1]) + box = test_data["box"][:numb_test] + atype = test_data["type"][0] + dipole = dp.eval(coord, box, atype) + + dipole = dipole.reshape([numb_test,-1]) + l2f = (l2err (dipole - test_data["dipole"] [:numb_test])) + + print ("# number of test data : %d " % numb_test) + print ("Dipole L2err : %e eV/A" % l2f) + + detail_file = args.detail_file + if detail_file is not None : + pe = np.concatenate((np.reshape(test_data["dipole"][:numb_test], [-1,3]), + np.reshape(dipole, [-1,3])), + axis = 1) + np.savetxt(detail_file+".out", pe, + header = 'data_x data_y data_z pred_x pred_y pred_z')