diff --git a/.gitignore b/.gitignore
index ebc0e2a..cdf8a95 100644
--- a/.gitignore
+++ b/.gitignore
@@ -131,6 +131,7 @@ dmypy.json
.pyre/
# experiments
+working/
save/
dataset/
kit/
@@ -139,3 +140,19 @@ glove/
body_models/
.vscode/
wandb/
+
+# vim swaps
+.*.sw?
+
+# tarballs and zips
+*.tar
+*.tar.gz
+*.tgz
+*.zip
+*.gz
+*.7z
+*.bzip
+*.bz2
+
+# numpy arrays
+*.np[yz]
diff --git a/README.md b/README.md
index 7228dd0..8c76ec6 100644
--- a/README.md
+++ b/README.md
@@ -29,6 +29,7 @@ If you find this code useful in your research, please cite:
## Getting started
This code was developed on `Ubuntu 20.04 LTS` with Python 3.7, CUDA 11.7 and PyTorch 1.13.1.
+The current `requirements.txt` was set up with Python 3.9, CUDA 11.3, PyTorch 1.12.1.
### 1. Setup environment
@@ -46,12 +47,10 @@ This codebase shares a large part of its base dependencies with [GMD](https://gi
Setup virtual env:
```shell
-python3 -m venv .env_condmdi
-source .env_condmdi/bin/activate
-pip uninstall ffmpeg
-pip install spacy
-python -m spacy download en_core_web_sm
-pip install git+https://github.com/openai/CLIP.git
+python3 -m venv .env_condmdi # pick your preferred name here
+source .env_condmdi/bin/activate # and use that name in place of .env_condmdi
+pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113
+pip install -r requirements.txt # updated to include spacy and clip configuration
```
Download dependencies:
@@ -78,36 +77,40 @@ bash prepare/download_recognition_unconstrained_models.sh
### 2. Get data
There are two paths to get the data:
-(a) **Generation only** wtih pretrained text-to-motion model without training or evaluating
-
-(b) **Get full data** to train and evaluate the model.
-
+
+ (a) **Generation only** with pretrained text-to-motion model without training or evaluating
-#### a. Generation only (text only)
+ #### a. Generation only (text only)
-**HumanML3D** - Clone HumanML3D, then copy the data dir to our repository:
+ **HumanML3D** - Clone HumanML3D, then copy the data dir to our repository:
-```shell
-cd ..
-git clone https://github.com/EricGuo5513/HumanML3D.git
-unzip ./HumanML3D/HumanML3D/texts.zip -d ./HumanML3D/HumanML3D/
-cp -r HumanML3D/HumanML3D diffusion-motion-inbetweening/dataset/HumanML3D
-cd diffusion-motion-inbetweening
-cp -a dataset/HumanML3D_abs/. dataset/HumanML3D/
-```
+ ```shell
+ cd ..
+ git clone https://github.com/EricGuo5513/HumanML3D.git
+ unzip ./HumanML3D/HumanML3D/texts.zip -d ./HumanML3D/HumanML3D/
+ cp -r HumanML3D/HumanML3D diffusion-motion-inbetweening/dataset/HumanML3D
+ cd CondMDI
+ cp -a dataset/HumanML3D_abs/. dataset/HumanML3D/
+ ```
+
+
+ (b) **Get full data** to train and evaluate the model.
-#### b. Full data (text + motion capture)
+ #### b. Full data (text + motion capture)
-**[Important !]**
-Following GMD, the representation of the root joint has been changed from relative to absolute. Therefore, you need to replace the original files and run GMD's version of `motion_representation.ipynb` and `cal_mean_variance.ipynb` provided in `./HumanML3D_abs/` instead to get the absolute-root data.
+ **HumanML3D** - Follow the instructions in [HumanML3D](https://github.com/EricGuo5513/HumanML3D.git),
+ then copy the result dataset to our repository:
-**HumanML3D** - Follow the instructions in [HumanML3D](https://github.com/EricGuo5513/HumanML3D.git),
-then copy the result dataset to our repository:
+ **[Important !]**
+ Following GMD, the representation of the root joint has been changed from relative to absolute. Therefore, when setting up HumanML3D, please
+ run GMD's version of `motion_representation.ipynb` and `cal_mean_variance.ipynb` instead to get the absolute-root data. These files are made
+ available in `./dataset/HumanML3D_abs/`.
-```shell
-cp -r ../HumanML3D/HumanML3D ./dataset/HumanML3D
-```
+ ```shell
+ cp -r ../HumanML3D/HumanML3D ./dataset/HumanML3D
+ ```
+
### 3. Download the pretrained models
@@ -156,7 +159,7 @@ python -m sample.conditional_synthesis --model_path ./save/condmdi_randomframes/
Text to Motion - With keyframe conditioning
### Generate from a single prompt - condition on keyframe locations
-#### using the uncoditioned model
+#### using the unconditioned model
```shell
python -m sample.edit --model_path ./save/condmdi_uncond/model000500000.pt --edit_mode benchmark_sparse --transition_length 5 --num_samples 10 --num_repetitions 3 --imputate --stop_imputation_at 1 --reconstruction_guidance --reconstruction_weight 20 --text_condition "a person throws a ball"
```
@@ -189,7 +192,7 @@ python -m sample.conditional_synthesis --model_path ./save/condmdi_randomframes/
* `--device` id.
* `--seed` to sample different prompts.
* `--motion_length` (text-to-motion only) in seconds (maximum is 9.8[sec]).
-* `--progress` to save the denosing progress.
+* `--progress` to save the denoising progress.
**Running those will get you:**
* `results.npy` file with text prompts and xyz positions of the generated animation
@@ -227,11 +230,11 @@ Our model is trained on the **HumanML3D** dataset.
```shell
python -m train.train_condmdi --keyframe_conditioned
```
-* You can ramove `--keyframe_conditioned` to train a unconditioned model.
+* You can remove `--keyframe_conditioned` to train a unconditioned model.
* Use `--device` to define GPU id.
## Evaluate
-All evaluation are done on the HumanML3D dataset.
+All evaluations are done on the HumanML3D dataset.
### Text to Motion - With keyframe conditioning
@@ -247,7 +250,7 @@ python -m eval.eval_humanml_condmdi --model_path ./save/condmdi_uncond/model0005
#### on the conditional model
```shell
-python -m eval.eval_humanml_condmdi --model_path ./save/condmdi_randomframes/model000750000.pt --edit_mode gmd_keyframes --keyframe_guidance_param 1.
+python -m eval.eval_humanml_condmdi --model_path ./save/condmdi_randomframes/model000750000.pt --edit_mode gmd_keyframes --keyframe_guidance_param 1
```
@@ -260,4 +263,4 @@ We would like to thank the following contributors for the great foundation that
## License
This code is distributed under an [MIT LICENSE](LICENSE).
-Note that our code depends on other libraries, including CLIP, SMPL, SMPL-X, PyTorch3D, and uses datasets that each have their own respective licenses that must also be followed.
\ No newline at end of file
+Note that our code depends on other libraries, including CLIP, SMPL, SMPL-X, PyTorch3D, and uses datasets that each have their own respective licenses that must also be followed.
diff --git a/configs/card.py b/configs/card.py
index 9d83440..61684ab 100644
--- a/configs/card.py
+++ b/configs/card.py
@@ -75,6 +75,12 @@ class motion_abs_unet_adagn_xl(
):
save_dir: str = 'save/unet_adazero_xl_x0_abs_loss1_fp16_clipwd_224'
+@dataclass
+class motion_abs_unet_adagn_xl_custom_batch(
+ data.humanml_motion_abs,
+ model.motion_unet_adagn_xl,
+):
+ batch_size: int = 2 ## change the batch size here
@dataclass
class motion_abs_unet_adagn_xl_loss2(
diff --git a/data_loaders/amass_utils.py b/data_loaders/amass_utils.py
index e71c0b3..ebb4067 100644
--- a/data_loaders/amass_utils.py
+++ b/data_loaders/amass_utils.py
@@ -4,35 +4,35 @@
# Matrix that shows joint correspondces to SMPL features
-MAT_POS = np.zeros((24, 764), dtype=np.bool)
+MAT_POS = np.zeros((24, 764), dtype=bool)
MAT_POS[0, :3] = True # root position = trans
for joint_idx in range(24):
ub = 3 + 24*3*3 + 3 * (joint_idx + 1)
lb = ub - 3
MAT_POS[joint_idx, lb:ub] = True # joint position = pos
-MAT_ROTMAT = np.zeros((24, 764), dtype=np.bool) # rotmat = 24,3,3 wrp to the parent joint
+MAT_ROTMAT = np.zeros((24, 764), dtype=bool) # rotmat = 24,3,3 wrp to the parent joint
for joint_idx in range(24):
ub = 3 + 3*3 * (joint_idx + 1)
lb = ub - 9
MAT_ROTMAT[joint_idx, lb:ub] = True # joint rotation = rotmat
-MAT_HEIGHT = np.zeros((24, 764), dtype=np.bool) # height = 24
+MAT_HEIGHT = np.zeros((24, 764), dtype=bool) # height = 24
for joint_idx in range(24):
ub = 3 + 24*3*3 + 24*3 + 24*3 + 8 + (joint_idx + 1)
lb = ub - 1
MAT_HEIGHT[joint_idx, lb:ub] = True # joint rotation = rotmat
-MAT_ROT6D = np.zeros((24, 764), dtype=np.bool) # rot2d = 24,2 wrp to the parent joint
+MAT_ROT6D = np.zeros((24, 764), dtype=bool) # rot2d = 24,2 wrp to the parent joint
for joint_idx in range(24):
ub = 3 + 24*3*3 + 24*3 + 24*3 + 8 + 24 + 3 + 24*3 + 24*6 + 6 + 6 * (joint_idx + 1)
lb = ub - 6
MAT_ROT6D[joint_idx, lb:ub] = True # joint rotation = rotmat
-MAT_ROT = np.zeros((24, 764), dtype=np.bool) # global_xform = 24, 6 wrp to the root
+MAT_ROT = np.zeros((24, 764), dtype=bool) # global_xform = 24, 6 wrp to the root
lb = 3 + 24*3*3 + 24*3 + 24*3 + 8 + 24 + 3 + 24*3 + 24*6
MAT_ROT[0, lb:lb+6] = True # root rotation = root_orient
for joint_idx in range(24):
ub = 3 + 24*3*3 + 24*3 + 24*3 + 8 + 24 + 3 + 24*3 + (joint_idx + 1) * 6
lb = ub - 6
- MAT_ROT[joint_idx, lb:ub] = True # joint rotation = global_xform
\ No newline at end of file
+ MAT_ROT[joint_idx, lb:ub] = True # joint rotation = global_xform
diff --git a/data_loaders/custom/README.md b/data_loaders/custom/README.md
new file mode 100644
index 0000000..7530daf
--- /dev/null
+++ b/data_loaders/custom/README.md
@@ -0,0 +1,419 @@
+# Working with Custom Rigs
+
+This is the general workflow for training and inference on custom rigs:
+
+1. convert your input animations into BVH
+2. convert BVH into numpy arrays
+3. process the numpy arrays to get rot6d vectors with absolute-root values
+4. annotate the text describing the animations
+5. modify script parameters to include this custom rig
+6. train the system
+7. do the inference
+8. convert the output numpy arrays back to BVH
+
+The "HumanML3D" workflow does step 3, then 6 and 7.
+
+
+## Introduction
+
+The [Flexible Motion In-Betweening][condmdi] model is trained on the [HumanML3D dataset][hml3d_fork],
+originally by [Eric Guo][hml3d_orig], which is a combination of various motion-capture sequences, all
+using the SMPL+ 22-node data structure. In order to train on a custom rig, we must specify the joints
+of the rig, and edit where the assumptions are made in the training script.
+
+This is the original workflow to obtain the HumanML3D dataset, summarized from the README there:
+
+
+## Original Workflow for HumanML3D:
+
+1. Download the various datasets from [AMASS][amass] then unzip them into the `amass_data/` folder in
+ the HumanML3D repository. Next, download `SMPL+H` models from [MANO][mano] and `DMPLS` models from
+ the [SMPL][smpl] sites. Unzip these and put them in the `body_models/` folder. Each of these sites
+ requires an account to be created before you download anything.
+2. Run `raw_pose_preprocess.ipynb` on the data. This gets poses from the AMASS data.
+3. Run the absolute value versions of `motion_processing.ipynb` and `cal_mean_variance.ipynb`. If you
+ cloned the [original][hml3d_orig] repo, please copy the notebooks from the `HumanML3D_abs/` folder
+ in [CondMDI][condmdi] to the root of the HumanML3D repo, then run those. In the [fork][hml3d_fork]
+ the notebooks are the absolute root joint versions; the original notebooks have the prefix `rel_`.
+4. Copy the processed data directory `HumanML3D/` into `dataset/`. The sequence data can now be found
+ in `new_joints_abs_3d/`, with the converted data in `new_joint_vecs_abs_3d/`.
+
+[amass]: https://amass.is.tue.mpg.de/download.php
+[smpl]: https://smpl.is.tue.mpg.de/download.php
+[mano]: https://mano.is.tue.mpg.de/download.php
+[condmdi]: https://github.com/icedwater/CondMDI
+[hml3d_fork]: https://github.com/icedwater/HumanML3D
+[hml3d_orig]: https://github.com/EricGuo5513/HumanML3D
+
+
+## Preparing a custom dataset for training
+
+Make sure a corresponding set of `$DATASET/joints` and `$DATASET/vecs` is present.
+The dimensions of each sequence nd-array in `joints` should be F x J x 3, F is the
+number of frames, J the number of joints in the rig, and 3 the coordinates of each
+joint. The `vecs` arrays should have dimensions F x (12J - 1) as per Appendix A of
+the [paper][condpaper]. These are generated by `motion_processing.ipynb` in normal
+operation with `HumanML3D`, but if we have the joints already, we only need to run
+`build_vectors.py` which constructs the `vecs` arrays.
+
+Each sequence must be accompanied by a text file containing some captions with the
+following format:
+
+ caption#tokens#from_tag#to_tag
+
+where `caption` describes one action in the sequence, `tokens` is the caption in a
+tokenised form, and the part of the sequence described by the caption is delimited
+by `from_tag` and `to_tag`. These last two values may be 0.0, in which case all of
+the sequence is used. In the open data, mirrored captions are saved in files which
+start with `M`:
+
+ $ cat 003157.txt (truncated)
+ a person makes a turn to the right.#a/DET person/NOUN make/VERB a/DET turn/VERB to/ADP the/DET right/NOUN#0.0#0.0
+ $ cat M003157.txt (truncated)
+ a person makes a turn to the left.#a/DET person/NOUN make/VERB a/DET turn/VERB to/ADP the/DET left/NOUN#0.0#0.0
+
+We can use `annotate_texts.py` to annotate actions described in `$DATASET/texts/`.
+
+Finally, we can compute the mean and variance arrays using `cal_mean_variance.py`,
+adopted from the notebook of the same name.
+
+[condpaper]: https://arxiv.org/html/2405.11126v2#A1
+
+Before training starts, `$DATASET` should have sub-directories `joints` and `vecs`
+containing the raw and preprocessed sequences, and a corresponding `texts` holding
+the descriptions of those actions.
+
+
+## Training with the custom dataset
+
+This is a summary of the steps to train on a custom rig called "myrig":
+
+1. Copy the `data_loaders/custom` directory to `data_loaders/myrig`.
+2. Update the dataset info for `myrig` in `data_loaders/myrig/data/dataset.py`.
+3. Update `data_loaders/get_data.py`.
+4. Update `data_loaders/myrig_utils.py`.
+5. Update `utils/model_util.py`.
+6. Update `utils/paramUtil.py`.
+7. Update `utils/editing_util.py`.
+8. Update `model/mdm_unet.py`.
+9. Update `utils/get_opt`.
+10. (Optional) Customize the training options in `configs/card.py`.
+11. Now the training can be performed, e.g. to train for 1 million steps with a checkpoint every
+ 200K steps, run the following command:
+
+```bash
+python -m train.train_condmdi --dataset myrig --save_interval 200_000 --num_steps 1_000_000 --device 0 --keyframe_conditioned
+```
+
+
+## Doing inference with the custom dataset
+
+In this case we are only handling conditional synthesis.
+
+1. Update `sample/conditional_synthesis.py`.
+
+
+The details of each step are highlighted below.
+
+
+### Create a new data_loader class called `myrig`
+
+Copy the `data_loaders/custom` directory to a new directory and call it `data_loaders/myrig`.
+
+
+### Update dataset info for `myrig` in `data_loaders/myrig/data/dataset.py`
+
+This file contains the specific settings for this rig.
+
+ - create new subclass `myrig` from data.Dataset here with specific settings
+ - /dataset/humanml_opt.txt is loaded as `opt` and `self.opt` within subclass
+ - import necessary dependencies (ignore t2m?)
+ - Text2MotionDatasetV2 and TextOnlyDataset depend on `--joints_num`, include that
+ - train t2m for custom rig here (make sure your training data is longer than `min_motion_len`)
+ - min_motion_len = 5 for t2m, else 24 (sequences below 5 frames are skipped)
+ - update the feet and facing joints in `motion_to_rel_data` and `motion_to_abs_data`
+ - start and end joints of left foot and right foot
+ - facing joints are Z-shape: right hip, left hip, right shoulder, left shoulder
+ - update the njoints in `sample_to_motion`, `abs3d_to_rel`, and `rel_to_abs3d`
+ - 22 is the default value for the HumanML3D dataset.
+
+
+### Update `data_loaders/get_data.py`
+
+This file contains the list of classes which can be used to create the model and the diffusion,
+both of which are used during training and inference. So we need to add `myrig` to the lists in
+both `get_dataset_class` and `get_dataset`.
+
+This means a new class called `myrig` needs to be built based on the default `HumanML3D` class.
+We can use `CustomRig` as a template. Once that is done, import it in `get_dataset_class`.
+
+
+### Update `scripts/motion_process.py`
+
+Some convenience values are hardcoded at the top of this file. In future, we should import them
+from `utils/paramUtil.py` directly, but refactoring will not be done just yet.
+
+These are the leg joints `l_idx1` and `l_idx2` (which may be the same as hip joints `r_hip` and
+`l_hip`), the right foot and left foot arrays `fid_r` and `fid_l`, the face joints vector which
+is a 'Z' traced from the right hip to the left hip, to the right upper arm, then the left upper
+arm, `face_joint_indx`, and `joints_num` the number of joints in the rig.
+
+Also check that `custom_raw_offsets` and `custom_kinematic_chain` from `paramUtils` are used in
+`process_file`. This function uses `data_dir` to generate `global_positions`, but retaining its
+original value of `./dataset/000021.npy` somehow still allows custom rig training.
+
+`recover_rot` hardcodes `joints_num` for HumanML3D and KIT only, but this function is currently
+not in use and may be removed in the future. The same applies to the `main()` block which tries
+to process KIT data.
+
+
+### Update `data_loaders/myrig_utils.py`
+The training process also requires some utilities that are currently only defined for HumanML3D
+and AMASS classes. Make sure `data_loaders/myrig_utils.py` exists and has the updated values.
+
+Here, the matrices for `myrig` are initialized to the same shape as the computed joint vectors,
+so that they can be imported into `utils/editing_util.py` for `joint_to_full_mask_custom`.
+
+
+### Update `utils/model_util.py`
+
+Make sure `myrig` is imported and included in the list of `Datasets`.
+
+Both training and inference require the `create_model_and_diffusion` function, so updating this
+will help both sides of the process. Since we are just reusing the UNET architecture which they
+built for the HumanML3D data, we don't need to change anything here.
+
+However, it depends on `get_model_args` which requires the dataset name and number of `njoints`
+corresponding to 12\*J - 1, where J is the actual number of joints in the rig as defined in the
+appendix of [the paper][condpaper].
+
+For example, with `humanml`, since the rig has 22 joints, `njoints` is set to 263.
+
+
+### Update `utils/paramUtil.py`
+
+ - create a `kinematic_chain` and `raw_offset` corresponding to `myrig`, for example:
+ ```
+ 0
+ |
+ 5--1--3
+ | | |
+ 6 2 4
+ ```
+
+ - `myrig_kinematic_chain`: a list of joint chains, here: [[0, 1, 2], [1, 3, 4], [1, 5, 6]].
+ Each sublist represents a single chain of joints (see above for explanation.)
+ - `myrig_raw_offset`: numpy array of relative displacement of each node from its parent, in
+ `[x,y,z]` order. In the above example, 0 is at the top, 3 and 5 are on the right and left
+ of 1, and nodes 1, 2, 4, 6 are each below their parent. This gives us:
+ ```
+ myrig_raw_offset = np.array(
+ [
+ [ 0, 0, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 1, 0, 0],
+ [ 0,-1, 0],
+ [-1, 0, 0],
+ [ 0,-1, 0]
+ ])
+ ```
+
+Include the above structures in `paramUtil.py` for the [HumanML3D][hml3d_fork] scripts, so that
+the custom `myrig` can be preprocessed with the `build_vectors.py`.
+
+
+### Update `utils/editing_util.py`
+
+Make sure that the `from data_loaders` import line includes `myrig_utils`.
+
+Then update the `joint_to_full_mask_custom` function with the correct import name. Ideally, the
+`joint_to_full_mask` function should be refactored to take the rig type as an argument, but for
+now we use a separate function.
+
+
+### Update `model/mdm_unet.py`
+
+Here, we have to manually add the details of `myrig` into the `MDM_UNET` class in a few places.
+
+Since we want keyframe conditioning, add `myrig` in the if/elif block in the constructor as one
+of the possible values for `self.dataset`. Set the value of `added_channels` to (12\*J - 1) for
+the rig to be processed properly.
+
+Since we want to use our text as well, add `myrig` to the `self.dataset` list in `encode_text`.
+It is not yet clear how the maximum token length affects training, but we reuse the values from
+the previous classes.
+
+In `forward()` an `assert` is used to limit the rig types that can use this model. Add `myrig`.
+
+Then make sure in `forward_core()` that the correct shape for `myrig` is used by adding `myrig`
+and the corresponding number of `njoints` to the if/elif block for keyframe conditioning.
+
+
+### Update `utils/get_opt.py`
+
+The `get_opt()` function is used to read in training arguments from `./dataset/humanml_opt.txt`
+which we can reuse. Note that `dataset_name` is set to `t2m` here and `max_text_len` is 20. The
+`data_root` and `data_dir` options should be pointing to the trained vector data. Make sure the
+values of `joints_num` and `dim_pose` are correctly defined. `dim_pose` should be computed from
+the number of joints J to be (12\*J - 1). J is now read from `humanml_opt.txt`.
+
+After running `annotate_texts.py`, point `text_dir` to the directory with the processed texts.
+
+
+### (Optional) Customize the training options in `configs/card.py`.
+
+If you need to permanently change some training options, you can create a new dataclass card in
+`configs/card.py` subclassing the default data and model settings and giving the updated values
+there, for instance to set `batch_size` to 2:
+
+```python
+@dataclass
+class motion_abs_unet_adagn_xl_custom_batch( # this class name should go to train_args()
+ data.humanml_motion_abs, # this is the current default for absolute motion data
+ model.motion_unet_adagn_xl, # this is the current default unet training setting
+):
+ batch_size: int = 2 ## change the batch size here
+```
+`utils/parser_util.py` contains all the options you can override this way.
+
+Use the class name in `train_args(base_cls=$CARD_NAME)` before you run `train/train_condmdi.py`
+if you use this method. To change settings one-off, just use command line options instead:
+
+```bash
+python -m train.train_condmdi --dataset myrig [... as above ...] --batch_size 2
+```
+
+
+## Using the trained custom model for inference
+
+Currently, we only use conditional synthesis. Add `myrig` to `sample/conditional_synthesis.py`.
+There is an `assert` in `main()` which tests for the model name and a place to specify `fps` or
+`max_frames` for `myrig`.
+
+Make sure that `args.dataset` checks for `myrig`. We can keep `hml_vec` for `model.data_rep` as
+this just reshapes the input without further manipulation. However, further down in `main()` we
+need to give the right number of `n_joints` for `myrig`.
+
+If we want to generate `mp4` videos of the output as well, we can add `myrig` to the list where
+we check `args.dataset` and run `plot_conditional_samples`. However, this is not needed for the
+actual inference to run.
+
+Once all this is done, we can run the conditional synthesis as shown below:
+
+```bash
+python -m sample.conditional_synthesis \
+ --dataset="myrig" --model_path "./save/path/modelx.pt" --edit_mode benchmark_sparse \
+ --transition_length 100 --n_keyframes 3 --num_repetitions 10 --seed 199 \
+ --text_prompt "a man walks across the room, trips and stumbles, then squats down"
+```
+
+where `model_path` is the trained model checkpoint for this synthesis, `transition_length`
+is the gap between each of the `n_keyframes` taken from a random sequence identified using
+the `seed` value we set (here, 199) and `num_repetitions` is the number of trial sequences
+to generate using the `text_prompt` provided.
+
+To vary the strength of the reference action vs the text prompt, experiment with the value
+of `transition_length` and maybe consider setting `keyframe_guidance_param` (default value
+is 1) to above 2.5 as suggested [in this bug thread][sparse].
+
+[sparse]: https://github.com/setarehc/diffusion-motion-inbetweening/issues/5#issuecomment-2197243178
+
+
+## Producing Output
+
+The result of the synthesis is a numpy array, `results.npy`, which contains the sequences.
+Each of these sequences can be converted into BVH format using the [joints2bvh][momjoints]
+script from the momask project, which other tools can convert to formats such as FBX.
+
+- can the existing scripts convert arbitrary J-joint rigs to the correct form? (no, need to fix)
+- will need to update momask joints2bvh: convert() to use nonstandard rig as well
+
+----
+> end of document
+----
+
+# Working Notes to Explore
+
+
+## Get_Data.py
+- dataset:
+ - add new type in DataOptions
+ - where is keyframe_conditioned used in ModelOptions?
+ - get_data.py:
+ - get_dataset_class("classname")
+ - from data_loaders.CLASSNAME.data import CLASSNAME
+
+## From the Training Arguments dataclasses
+args = train_args(base_cls=card.motion_abs_unet_adagn_xl)
+ = HfArgumentParser(base_cls).parse_args_into_dataclasses()[0]
+
+--> does the base_cls affect any params?
+
+--> TrainArgs(BaseOptions, DataOptions, ModelOptions, DiffusionOptions, TrainingOptions)
+ - cuda: bool=True
+ - device: int=0
+ - seed: int=10
+
+ - dataset: str="humanml", ["humanml", "kit", "humanact12", "uestc", "amass"]
+ - data_dir: str="", check dataset defaults
+ - abs_3d: bool=False
+ - traj_only: bool=False
+ - xz_only: Tuple[int]=False??
+ - use_random_proj: bool=False
+ - random_proj_scale: float=10.0
+ - augment_type: str="none", ["none", "rot", "full"]
+ - std_scale_shift: Tuple[float]=(1.0, 0.0)
+ - drop_redundant: bool=False (if true, keep only 4 + 21*3)
+
+ - arch: str="trans_enc", check paper for arch types
+ - emb_trans_dec: bool=False (toggle inject condition as class token in trans_dec)
+ - layers: int=8
+ - latent_dim: int=512 (tf/gru width)
+ - ff_size: int=1024 (tf feedforward size)
+ - dim_mults: Tuple[float]=(2, 2, 2, 2) (channel multipliers for unet)
+ - unet_adagn: bool=True (adaptive group normalization for unet)
+ - unet_zero: bool=True (zero weight init for unet)
+ - out_mult: bool=1 (large variation feature multiplier for unet/tf)
+ - cond_mask_prob: float=0.1 (prob(mask cond during training) for cfg)
+ - keyframe_mask_prob: float=0.1 (prob(mask keyframe cond during training) for cfg)
+ - lambda_rcxyz: float=0.0, joint pos loss
+ - lambda_vel: float=0.0, joint vel loss
+ - lambda_fc: float=0.0, foot contact loss
+ - unconstrained: bool=False (training independent of text, action. only humanact12)
+ - keyframe_conditioned: bool=False (condition on keyframes. only hml3d)
+ - keyframe_selection_scheme: str="random_frames", ["random_frames", "random_joints", "random"]
+ - zero_keyframe_loss: bool=False (zero the loss over observed keyframe loss, or allow model to make predictions over observed keyframes if false)
+
+ - noise_schedule: str="cosine"
+ - diffusion_steps: int=1000, T in paper
+ - sigma_small: bool=True, what?
+ - predict_xstart: bool=True, what?
+ - use_ddim: bool=False, what?
+ - clip_range: float=6.0, range to clip what?
+
+ - save_dir: str=None
+ - overwrite: bool=False, true to reuse existing dir
+ - batch_size: int=64
+ - train_platform_type: str="NoPlatform", ["NoPlatform", "ClearmlPlatform", "TensorboardPlatform", "WandbPlatform"]
+ - lr: float=1e-4, learning rate
+ - weight_decay: float=0, optimizer weight decay
+ - grad_clip: float=0, gradient clip
+ - use_fp16: bool=False
+ - avg_model_beta: float=0, 0 means disabled
+ - adam_beta2: float=0.999
+ - lr_anneal_steps: int=0
+ - eval_batch_size: int=32, <>
+ - eval_split: str="test", ["val", "test"]
+ - eval_during_training: bool=False
+ - eval_rep_times: int=3, times to loop evaluation during training
+ - eval_num_samples: int=1_000, set to -1 to use all
+ - log_interval: int=1_000, N steps before losses should be logged
+ - save_interval: int=100_000, N steps to save checkpoint AND run evaluation if asked
+ - num_steps: int=1_200_000
+ - num_frames: int=60, frame limit ignored by hml3d and KIT (check what the value there is)
+ - resume_checkpoint: str="", continue training from checkpoint 'model_.pt'
+ - apply_zero_mask: bool=False
+ - traj_extra_weight: float=1.0, extra weight for what?
+ - time_weighted_loss: bool=False, what does this do?
+ - train_x0_as_eps: bool=False, what is x0 and what is eps?
diff --git a/data_loaders/custom/common/quaternion.py b/data_loaders/custom/common/quaternion.py
new file mode 100644
index 0000000..5051507
--- /dev/null
+++ b/data_loaders/custom/common/quaternion.py
@@ -0,0 +1,423 @@
+# Copyright (c) 2018-present, Facebook, Inc.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+
+import torch
+import numpy as np
+
+_EPS4 = np.finfo(float).eps * 4.0
+
+_FLOAT_EPS = np.finfo(float).eps
+
+# PyTorch-backed implementations
+def qinv(q):
+ assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)'
+ mask = torch.ones_like(q)
+ mask[..., 1:] = -mask[..., 1:]
+ return q * mask
+
+
+def qinv_np(q):
+ assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)'
+ return qinv(torch.from_numpy(q).float()).numpy()
+
+
+def qnormalize(q):
+ assert q.shape[-1] == 4, 'q must be a tensor of shape (*, 4)'
+ return q / torch.norm(q, dim=-1, keepdim=True)
+
+
+def qmul(q, r):
+ """
+ Multiply quaternion(s) q with quaternion(s) r.
+ Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.
+ Returns q*r as a tensor of shape (*, 4).
+ """
+ assert q.shape[-1] == 4
+ assert r.shape[-1] == 4
+
+ original_shape = q.shape
+
+ # Compute outer product
+ terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4))
+
+ w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
+ x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
+ y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
+ z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
+ return torch.stack((w, x, y, z), dim=1).view(original_shape)
+
+
+def qrot(q, v):
+ """
+ Rotate vector(s) v about the rotation described by quaternion(s) q.
+ Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
+ where * denotes any number of dimensions.
+ Returns a tensor of shape (*, 3).
+ """
+ assert q.shape[-1] == 4
+ assert v.shape[-1] == 3
+ assert q.shape[:-1] == v.shape[:-1]
+
+ original_shape = list(v.shape)
+ # print(q.shape)
+ q = q.contiguous().view(-1, 4)
+ v = v.contiguous().view(-1, 3)
+
+ qvec = q[:, 1:]
+ uv = torch.cross(qvec, v, dim=1)
+ uuv = torch.cross(qvec, uv, dim=1)
+ return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)
+
+
+def qeuler(q, order, epsilon=0, deg=True):
+ """
+ Convert quaternion(s) q to Euler angles.
+ Expects a tensor of shape (*, 4), where * denotes any number of dimensions.
+ Returns a tensor of shape (*, 3).
+ """
+ assert q.shape[-1] == 4
+
+ original_shape = list(q.shape)
+ original_shape[-1] = 3
+ q = q.view(-1, 4)
+
+ q0 = q[:, 0]
+ q1 = q[:, 1]
+ q2 = q[:, 2]
+ q3 = q[:, 3]
+
+ if order == 'xyz':
+ x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
+ y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon))
+ z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
+ elif order == 'yzx':
+ x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
+ y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3))
+ z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon))
+ elif order == 'zxy':
+ x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon))
+ y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
+ z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3))
+ elif order == 'xzy':
+ x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
+ y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3))
+ z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon))
+ elif order == 'yxz':
+ x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon))
+ y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2))
+ z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
+ elif order == 'zyx':
+ x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
+ y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon))
+ z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
+ else:
+ raise
+
+ if deg:
+ return torch.stack((x, y, z), dim=1).view(original_shape) * 180 / np.pi
+ else:
+ return torch.stack((x, y, z), dim=1).view(original_shape)
+
+
+# Numpy-backed implementations
+
+def qmul_np(q, r):
+ q = torch.from_numpy(q).contiguous().float()
+ r = torch.from_numpy(r).contiguous().float()
+ return qmul(q, r).numpy()
+
+
+def qrot_np(q, v):
+ q = torch.from_numpy(q).contiguous().float()
+ v = torch.from_numpy(v).contiguous().float()
+ return qrot(q, v).numpy()
+
+
+def qeuler_np(q, order, epsilon=0, use_gpu=False):
+ if use_gpu:
+ q = torch.from_numpy(q).cuda().float()
+ return qeuler(q, order, epsilon).cpu().numpy()
+ else:
+ q = torch.from_numpy(q).contiguous().float()
+ return qeuler(q, order, epsilon).numpy()
+
+
+def qfix(q):
+ """
+ Enforce quaternion continuity across the time dimension by selecting
+ the representation (q or -q) with minimal distance (or, equivalently, maximal dot product)
+ between two consecutive frames.
+
+ Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints.
+ Returns a tensor of the same shape.
+ """
+ assert len(q.shape) == 3
+ assert q.shape[-1] == 4
+
+ result = q.copy()
+ dot_products = np.sum(q[1:] * q[:-1], axis=2)
+ mask = dot_products < 0
+ mask = (np.cumsum(mask, axis=0) % 2).astype(bool)
+ result[1:][mask] *= -1
+ return result
+
+
+def euler2quat(e, order, deg=True):
+ """
+ Convert Euler angles to quaternions.
+ """
+ assert e.shape[-1] == 3
+
+ original_shape = list(e.shape)
+ original_shape[-1] = 4
+
+ e = e.view(-1, 3)
+
+ ## if euler angles in degrees
+ if deg:
+ e = e * np.pi / 180.
+
+ x = e[:, 0]
+ y = e[:, 1]
+ z = e[:, 2]
+
+ rx = torch.stack((torch.cos(x / 2), torch.sin(x / 2), torch.zeros_like(x), torch.zeros_like(x)), dim=1)
+ ry = torch.stack((torch.cos(y / 2), torch.zeros_like(y), torch.sin(y / 2), torch.zeros_like(y)), dim=1)
+ rz = torch.stack((torch.cos(z / 2), torch.zeros_like(z), torch.zeros_like(z), torch.sin(z / 2)), dim=1)
+
+ result = None
+ for coord in order:
+ if coord == 'x':
+ r = rx
+ elif coord == 'y':
+ r = ry
+ elif coord == 'z':
+ r = rz
+ else:
+ raise
+ if result is None:
+ result = r
+ else:
+ result = qmul(result, r)
+
+ # Reverse antipodal representation to have a non-negative "w"
+ if order in ['xyz', 'yzx', 'zxy']:
+ result *= -1
+
+ return result.view(original_shape)
+
+
+def expmap_to_quaternion(e):
+ """
+ Convert axis-angle rotations (aka exponential maps) to quaternions.
+ Stable formula from "Practical Parameterization of Rotations Using the Exponential Map".
+ Expects a tensor of shape (*, 3), where * denotes any number of dimensions.
+ Returns a tensor of shape (*, 4).
+ """
+ assert e.shape[-1] == 3
+
+ original_shape = list(e.shape)
+ original_shape[-1] = 4
+ e = e.reshape(-1, 3)
+
+ theta = np.linalg.norm(e, axis=1).reshape(-1, 1)
+ w = np.cos(0.5 * theta).reshape(-1, 1)
+ xyz = 0.5 * np.sinc(0.5 * theta / np.pi) * e
+ return np.concatenate((w, xyz), axis=1).reshape(original_shape)
+
+
+def euler_to_quaternion(e, order):
+ """
+ Convert Euler angles to quaternions.
+ """
+ assert e.shape[-1] == 3
+
+ original_shape = list(e.shape)
+ original_shape[-1] = 4
+
+ e = e.reshape(-1, 3)
+
+ x = e[:, 0]
+ y = e[:, 1]
+ z = e[:, 2]
+
+ rx = np.stack((np.cos(x / 2), np.sin(x / 2), np.zeros_like(x), np.zeros_like(x)), axis=1)
+ ry = np.stack((np.cos(y / 2), np.zeros_like(y), np.sin(y / 2), np.zeros_like(y)), axis=1)
+ rz = np.stack((np.cos(z / 2), np.zeros_like(z), np.zeros_like(z), np.sin(z / 2)), axis=1)
+
+ result = None
+ for coord in order:
+ if coord == 'x':
+ r = rx
+ elif coord == 'y':
+ r = ry
+ elif coord == 'z':
+ r = rz
+ else:
+ raise
+ if result is None:
+ result = r
+ else:
+ result = qmul_np(result, r)
+
+ # Reverse antipodal representation to have a non-negative "w"
+ if order in ['xyz', 'yzx', 'zxy']:
+ result *= -1
+
+ return result.reshape(original_shape)
+
+
+def quaternion_to_matrix(quaternions):
+ """
+ Convert rotations given as quaternions to rotation matrices.
+ Args:
+ quaternions: quaternions with real part first,
+ as tensor of shape (..., 4).
+ Returns:
+ Rotation matrices as tensor of shape (..., 3, 3).
+ """
+ r, i, j, k = torch.unbind(quaternions, -1)
+ two_s = 2.0 / (quaternions * quaternions).sum(-1)
+
+ o = torch.stack(
+ (
+ 1 - two_s * (j * j + k * k),
+ two_s * (i * j - k * r),
+ two_s * (i * k + j * r),
+ two_s * (i * j + k * r),
+ 1 - two_s * (i * i + k * k),
+ two_s * (j * k - i * r),
+ two_s * (i * k - j * r),
+ two_s * (j * k + i * r),
+ 1 - two_s * (i * i + j * j),
+ ),
+ -1,
+ )
+ return o.reshape(quaternions.shape[:-1] + (3, 3))
+
+
+def quaternion_to_matrix_np(quaternions):
+ q = torch.from_numpy(quaternions).contiguous().float()
+ return quaternion_to_matrix(q).numpy()
+
+
+def quaternion_to_cont6d_np(quaternions):
+ rotation_mat = quaternion_to_matrix_np(quaternions)
+ cont_6d = np.concatenate([rotation_mat[..., 0], rotation_mat[..., 1]], axis=-1)
+ return cont_6d
+
+
+def quaternion_to_cont6d(quaternions):
+ rotation_mat = quaternion_to_matrix(quaternions)
+ cont_6d = torch.cat([rotation_mat[..., 0], rotation_mat[..., 1]], dim=-1)
+ return cont_6d
+
+
+def cont6d_to_matrix(cont6d):
+ assert cont6d.shape[-1] == 6, "The last dimension must be 6"
+ x_raw = cont6d[..., 0:3]
+ y_raw = cont6d[..., 3:6]
+
+ x = x_raw / torch.norm(x_raw, dim=-1, keepdim=True)
+ z = torch.cross(x, y_raw, dim=-1)
+ z = z / torch.norm(z, dim=-1, keepdim=True)
+
+ y = torch.cross(z, x, dim=-1)
+
+ x = x[..., None]
+ y = y[..., None]
+ z = z[..., None]
+
+ mat = torch.cat([x, y, z], dim=-1)
+ return mat
+
+
+def cont6d_to_matrix_np(cont6d):
+ q = torch.from_numpy(cont6d).contiguous().float()
+ return cont6d_to_matrix(q).numpy()
+
+
+def qpow(q0, t, dtype=torch.float):
+ ''' q0 : tensor of quaternions
+ t: tensor of powers
+ '''
+ q0 = qnormalize(q0)
+ theta0 = torch.acos(q0[..., 0])
+
+ ## if theta0 is close to zero, add epsilon to avoid NaNs
+ mask = (theta0 <= 10e-10) * (theta0 >= -10e-10)
+ theta0 = (1 - mask) * theta0 + mask * 10e-10
+ v0 = q0[..., 1:] / torch.sin(theta0).view(-1, 1)
+
+ if isinstance(t, torch.Tensor):
+ q = torch.zeros(t.shape + q0.shape)
+ theta = t.view(-1, 1) * theta0.view(1, -1)
+ else: ## if t is a number
+ q = torch.zeros(q0.shape)
+ theta = t * theta0
+
+ q[..., 0] = torch.cos(theta)
+ q[..., 1:] = v0 * torch.sin(theta).unsqueeze(-1)
+
+ return q.to(dtype)
+
+
+def qslerp(q0, q1, t):
+ '''
+ q0: starting quaternion
+ q1: ending quaternion
+ t: array of points along the way
+
+ Returns:
+ Tensor of Slerps: t.shape + q0.shape
+ '''
+
+ q0 = qnormalize(q0)
+ q1 = qnormalize(q1)
+ q_ = qpow(qmul(q1, qinv(q0)), t)
+
+ return qmul(q_,
+ q0.contiguous().view(torch.Size([1] * len(t.shape)) + q0.shape).expand(t.shape + q0.shape).contiguous())
+
+
+def qbetween(v0, v1):
+ '''
+ find the quaternion used to rotate v0 to v1
+ '''
+ assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)'
+ assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)'
+
+ v = torch.cross(v0, v1)
+ w = torch.sqrt((v0 ** 2).sum(dim=-1, keepdim=True) * (v1 ** 2).sum(dim=-1, keepdim=True)) + (v0 * v1).sum(dim=-1,
+ keepdim=True)
+ return qnormalize(torch.cat([w, v], dim=-1))
+
+
+def qbetween_np(v0, v1):
+ '''
+ find the quaternion used to rotate v0 to v1
+ '''
+ assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)'
+ assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)'
+
+ v0 = torch.from_numpy(v0).float()
+ v1 = torch.from_numpy(v1).float()
+ return qbetween(v0, v1).numpy()
+
+
+def lerp(p0, p1, t):
+ if not isinstance(t, torch.Tensor):
+ t = torch.Tensor([t])
+
+ new_shape = t.shape + p0.shape
+ new_view_t = t.shape + torch.Size([1] * len(p0.shape))
+ new_view_p = torch.Size([1] * len(t.shape)) + p0.shape
+ p0 = p0.view(new_view_p).expand(new_shape)
+ p1 = p1.view(new_view_p).expand(new_shape)
+ t = t.view(new_view_t).expand(new_shape)
+
+ return p0 + t * (p1 - p0)
diff --git a/data_loaders/custom/common/skeleton.py b/data_loaders/custom/common/skeleton.py
new file mode 100644
index 0000000..b6f9167
--- /dev/null
+++ b/data_loaders/custom/common/skeleton.py
@@ -0,0 +1,199 @@
+from data_loaders.custom.common.quaternion import *
+import scipy.ndimage.filters as filters
+
+class Skeleton(object):
+ def __init__(self, offset, kinematic_tree, device):
+ self.device = device
+ self._raw_offset_np = offset.numpy()
+ self._raw_offset = offset.clone().detach().to(device).float()
+ self._kinematic_tree = kinematic_tree
+ self._offset = None
+ self._parents = [0] * len(self._raw_offset)
+ self._parents[0] = -1
+ for chain in self._kinematic_tree:
+ for j in range(1, len(chain)):
+ self._parents[chain[j]] = chain[j-1]
+
+ def njoints(self):
+ return len(self._raw_offset)
+
+ def offset(self):
+ return self._offset
+
+ def set_offset(self, offsets):
+ self._offset = offsets.clone().detach().to(self.device).float()
+
+ def kinematic_tree(self):
+ return self._kinematic_tree
+
+ def parents(self):
+ return self._parents
+
+ # joints (batch_size, joints_num, 3)
+ def get_offsets_joints_batch(self, joints):
+ assert len(joints.shape) == 3
+ _offsets = self._raw_offset.expand(joints.shape[0], -1, -1).clone()
+ for i in range(1, self._raw_offset.shape[0]):
+ _offsets[:, i] = torch.norm(joints[:, i] - joints[:, self._parents[i]], p=2, dim=1)[:, None] * _offsets[:, i]
+
+ self._offset = _offsets.detach()
+ return _offsets
+
+ # joints (joints_num, 3)
+ def get_offsets_joints(self, joints):
+ assert len(joints.shape) == 2
+ _offsets = self._raw_offset.clone()
+ for i in range(1, self._raw_offset.shape[0]):
+ # print(joints.shape)
+ _offsets[i] = torch.norm(joints[i] - joints[self._parents[i]], p=2, dim=0) * _offsets[i]
+
+ self._offset = _offsets.detach()
+ return _offsets
+
+ # face_joint_idx should follow the order of right hip, left hip, right shoulder, left shoulder
+ # joints (batch_size, joints_num, 3)
+ def inverse_kinematics_np(self, joints, face_joint_idx, smooth_forward=False):
+ assert len(face_joint_idx) == 4
+ '''Get Forward Direction'''
+ l_hip, r_hip, sdr_r, sdr_l = face_joint_idx
+ across1 = joints[:, r_hip] - joints[:, l_hip]
+ across2 = joints[:, sdr_r] - joints[:, sdr_l]
+ across = across1 + across2
+ across = across / np.sqrt((across**2).sum(axis=-1))[:, np.newaxis]
+ # print(across1.shape, across2.shape)
+
+ # forward (batch_size, 3)
+ forward = np.cross(np.array([[0, 1, 0]]), across, axis=-1)
+ if smooth_forward:
+ forward = filters.gaussian_filter1d(forward, 20, axis=0, mode='nearest')
+ # forward (batch_size, 3)
+ forward = forward / np.sqrt((forward**2).sum(axis=-1))[..., np.newaxis]
+
+ '''Get Root Rotation'''
+ target = np.array([[0,0,1]]).repeat(len(forward), axis=0)
+ root_quat = qbetween_np(forward, target)
+
+ '''Inverse Kinematics'''
+ # quat_params (batch_size, joints_num, 4)
+ # print(joints.shape[:-1])
+ quat_params = np.zeros(joints.shape[:-1] + (4,))
+ # print(quat_params.shape)
+ root_quat[0] = np.array([[1.0, 0.0, 0.0, 0.0]])
+ quat_params[:, 0] = root_quat
+ # quat_params[0, 0] = np.array([[1.0, 0.0, 0.0, 0.0]])
+ for chain in self._kinematic_tree:
+ R = root_quat
+ for j in range(len(chain) - 1):
+ # (batch, 3)
+ u = self._raw_offset_np[chain[j+1]][np.newaxis,...].repeat(len(joints), axis=0)
+ # print(u.shape)
+ # (batch, 3)
+ v = joints[:, chain[j+1]] - joints[:, chain[j]]
+ v = v / np.sqrt((v**2).sum(axis=-1))[:, np.newaxis]
+ # print(u.shape, v.shape)
+ rot_u_v = qbetween_np(u, v)
+
+ R_loc = qmul_np(qinv_np(R), rot_u_v)
+
+ quat_params[:,chain[j + 1], :] = R_loc
+ R = qmul_np(R, R_loc)
+
+ return quat_params
+
+ # Be sure root joint is at the beginning of kinematic chains
+ def forward_kinematics(self, quat_params, root_pos, skel_joints=None, do_root_R=True):
+ # quat_params (batch_size, joints_num, 4)
+ # joints (batch_size, joints_num, 3)
+ # root_pos (batch_size, 3)
+ if skel_joints is not None:
+ offsets = self.get_offsets_joints_batch(skel_joints)
+ if len(self._offset.shape) == 2:
+ offsets = self._offset.expand(quat_params.shape[0], -1, -1)
+ joints = torch.zeros(quat_params.shape[:-1] + (3,)).to(self.device)
+ joints[:, 0] = root_pos
+ for chain in self._kinematic_tree:
+ if do_root_R:
+ R = quat_params[:, 0]
+ else:
+ R = torch.tensor([[1.0, 0.0, 0.0, 0.0]]).expand(len(quat_params), -1).detach().to(self.device)
+ for i in range(1, len(chain)):
+ R = qmul(R, quat_params[:, chain[i]])
+ offset_vec = offsets[:, chain[i]]
+ joints[:, chain[i]] = qrot(R, offset_vec) + joints[:, chain[i-1]]
+ return joints
+
+ # Be sure root joint is at the beginning of kinematic chains
+ def forward_kinematics_np(self, quat_params, root_pos, skel_joints=None, do_root_R=True):
+ # quat_params (batch_size, joints_num, 4)
+ # joints (batch_size, joints_num, 3)
+ # root_pos (batch_size, 3)
+ if skel_joints is not None:
+ skel_joints = torch.from_numpy(skel_joints)
+ offsets = self.get_offsets_joints_batch(skel_joints)
+ if len(self._offset.shape) == 2:
+ offsets = self._offset.expand(quat_params.shape[0], -1, -1)
+ offsets = offsets.numpy()
+ joints = np.zeros(quat_params.shape[:-1] + (3,))
+ joints[:, 0] = root_pos
+ for chain in self._kinematic_tree:
+ if do_root_R:
+ R = quat_params[:, 0]
+ else:
+ R = np.array([[1.0, 0.0, 0.0, 0.0]]).repeat(len(quat_params), axis=0)
+ for i in range(1, len(chain)):
+ R = qmul_np(R, quat_params[:, chain[i]])
+ offset_vec = offsets[:, chain[i]]
+ joints[:, chain[i]] = qrot_np(R, offset_vec) + joints[:, chain[i - 1]]
+ return joints
+
+ def forward_kinematics_cont6d_np(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True):
+ # cont6d_params (batch_size, joints_num, 6)
+ # joints (batch_size, joints_num, 3)
+ # root_pos (batch_size, 3)
+ if skel_joints is not None:
+ skel_joints = torch.from_numpy(skel_joints)
+ offsets = self.get_offsets_joints_batch(skel_joints)
+ if len(self._offset.shape) == 2:
+ offsets = self._offset.expand(cont6d_params.shape[0], -1, -1)
+ offsets = offsets.numpy()
+ joints = np.zeros(cont6d_params.shape[:-1] + (3,))
+ joints[:, 0] = root_pos
+ for chain in self._kinematic_tree:
+ if do_root_R:
+ matR = cont6d_to_matrix_np(cont6d_params[:, 0])
+ else:
+ matR = np.eye(3)[np.newaxis, :].repeat(len(cont6d_params), axis=0)
+ for i in range(1, len(chain)):
+ matR = np.matmul(matR, cont6d_to_matrix_np(cont6d_params[:, chain[i]]))
+ offset_vec = offsets[:, chain[i]][..., np.newaxis]
+ # print(matR.shape, offset_vec.shape)
+ joints[:, chain[i]] = np.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]]
+ return joints
+
+ def forward_kinematics_cont6d(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True):
+ # cont6d_params (batch_size, joints_num, 6)
+ # joints (batch_size, joints_num, 3)
+ # root_pos (batch_size, 3)
+ if skel_joints is not None:
+ # skel_joints = torch.from_numpy(skel_joints)
+ offsets = self.get_offsets_joints_batch(skel_joints)
+ if len(self._offset.shape) == 2:
+ offsets = self._offset.expand(cont6d_params.shape[0], -1, -1)
+ joints = torch.zeros(cont6d_params.shape[:-1] + (3,)).to(cont6d_params.device)
+ joints[..., 0, :] = root_pos
+ for chain in self._kinematic_tree:
+ if do_root_R:
+ matR = cont6d_to_matrix(cont6d_params[:, 0])
+ else:
+ matR = torch.eye(3).expand((len(cont6d_params), -1, -1)).detach().to(cont6d_params.device)
+ for i in range(1, len(chain)):
+ matR = torch.matmul(matR, cont6d_to_matrix(cont6d_params[:, chain[i]]))
+ offset_vec = offsets[:, chain[i]].unsqueeze(-1)
+ # print(matR.shape, offset_vec.shape)
+ joints[:, chain[i]] = torch.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]]
+ return joints
+
+
+
+
+
diff --git a/data_loaders/custom/data/__init__.py b/data_loaders/custom/data/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/data_loaders/custom/data/dataset.py b/data_loaders/custom/data/dataset.py
new file mode 100644
index 0000000..57063cf
--- /dev/null
+++ b/data_loaders/custom/data/dataset.py
@@ -0,0 +1,1372 @@
+import torch
+from torch.utils import data
+import numpy as np
+import os
+from os.path import join as pjoin
+import random
+import codecs as cs
+from tqdm import tqdm
+import spacy
+
+from torch.utils.data._utils.collate import default_collate
+from data_loaders.custom.utils.word_vectorizer import WordVectorizer
+from data_loaders.custom.utils.get_opt import get_opt
+from data_loaders.custom.common.quaternion import qinv, qrot
+from data_loaders.custom.scripts.motion_process import recover_from_ric, extract_features
+from data_loaders.custom.utils.paramUtil import *
+from data_loaders.custom.common.skeleton import Skeleton
+
+
+def collate_fn(batch):
+ batch.sort(key=lambda x: x[3], reverse=True)
+ return default_collate(batch)
+
+
+'''For use of training text-2-motion generative model'''
+class Text2MotionDataset(data.Dataset):
+ def __init__(self, opt, mean, std, split_file, w_vectorizer):
+ self.opt = opt
+ self.w_vectorizer = w_vectorizer
+ self.max_length = 20
+ self.pointer = 0
+ min_motion_len = 40 if self.opt.dataset_name == 't2m' else 24
+
+ joints_num = opt.joints_num
+
+ data_dict = {}
+ id_list = []
+ with cs.open(split_file, 'r') as f:
+ for line in f.readlines():
+ id_list.append(line.strip())
+
+ new_name_list = []
+ length_list = []
+ for name in tqdm(id_list):
+ try:
+ motion = np.load(pjoin(opt.motion_dir, name + '.npy'))
+ if (len(motion)) < min_motion_len or (len(motion) >= 200):
+ continue
+ text_data = []
+ flag = False
+ with cs.open(pjoin(opt.text_dir, name + '.txt')) as f:
+ for line in f.readlines():
+ text_dict = {}
+ line_split = line.strip().split('#')
+ caption = line_split[0]
+ tokens = line_split[1].split(' ')
+ f_tag = float(line_split[2])
+ to_tag = float(line_split[3])
+ f_tag = 0.0 if np.isnan(f_tag) else f_tag
+ to_tag = 0.0 if np.isnan(to_tag) else to_tag
+
+ text_dict['caption'] = caption
+ text_dict['tokens'] = tokens
+ if f_tag == 0.0 and to_tag == 0.0:
+ flag = True
+ text_data.append(text_dict)
+ else:
+ try:
+ n_motion = motion[int(f_tag * 20):int(to_tag *
+ 20)]
+ if (len(n_motion)) < min_motion_len or (
+ len(n_motion) >= 200):
+ continue
+ new_name = random.choice(
+ 'ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
+ while new_name in data_dict:
+ new_name = random.choice(
+ 'ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
+ data_dict[new_name] = {
+ 'motion': n_motion,
+ 'length': len(n_motion),
+ 'text': [text_dict]
+ }
+ new_name_list.append(new_name)
+ length_list.append(len(n_motion))
+ except:
+ print(line_split)
+ print(line_split[2], line_split[3], f_tag,
+ to_tag, name)
+ # break
+
+ if flag:
+ data_dict[name] = {
+ 'motion': motion,
+ 'length': len(motion),
+ 'text': text_data
+ }
+ new_name_list.append(name)
+ length_list.append(len(motion))
+ except:
+ # Some motion may not exist in KIT dataset
+ pass
+
+ name_list, length_list = zip(
+ *sorted(zip(new_name_list, length_list), key=lambda x: x[1]))
+
+ if opt.is_train:
+ # root_rot_velocity (B, seq_len, 1)
+ std[0:1] = std[0:1] / opt.feat_bias
+ # root_linear_velocity (B, seq_len, 2)
+ std[1:3] = std[1:3] / opt.feat_bias
+ # root_y (B, seq_len, 1)
+ std[3:4] = std[3:4] / opt.feat_bias
+ # ric_data (B, seq_len, (joints_num - 1)*3)
+ std[4:4 + (joints_num - 1) * 3] = std[4:4 +
+ (joints_num - 1) * 3] / 1.0
+ # rot_data (B, seq_len, (joints_num - 1)*6)
+ std[4 + (joints_num - 1) * 3:4 +
+ (joints_num - 1) * 9] = std[4 + (joints_num - 1) * 3:4 +
+ (joints_num - 1) * 9] / 1.0
+ # local_velocity (B, seq_len, joints_num*3)
+ std[4 + (joints_num - 1) * 9:4 + (joints_num - 1) * 9 +
+ joints_num *
+ 3] = std[4 + (joints_num - 1) * 9:4 +
+ (joints_num - 1) * 9 + joints_num * 3] / 1.0
+ # foot contact (B, seq_len, 4)
+ std[4 + (joints_num - 1) * 9 +
+ joints_num * 3:] = std[4 + (joints_num - 1) * 9 +
+ joints_num * 3:] / opt.feat_bias
+
+ assert 4 + (joints_num -
+ 1) * 9 + joints_num * 3 + 4 == mean.shape[-1]
+ np.save(pjoin(opt.meta_dir, 'mean.npy'), mean)
+ np.save(pjoin(opt.meta_dir, 'std.npy'), std)
+
+ self.mean = mean
+ self.std = std
+ self.length_arr = np.array(length_list)
+ self.data_dict = data_dict
+ self.name_list = name_list
+ self.reset_max_len(self.max_length)
+
+ def reset_max_len(self, length):
+ assert length <= self.opt.max_motion_length
+ self.pointer = np.searchsorted(self.length_arr, length)
+ print("Pointer Pointing at %d" % self.pointer)
+ self.max_length = length
+
+ def inv_transform(self, data):
+ return data * self.std + self.mean
+
+ def __len__(self):
+ return len(self.data_dict) - self.pointer
+
+ def __getitem__(self, item):
+ idx = self.pointer + item
+ data = self.data_dict[self.name_list[idx]]
+ motion, m_length, text_list = data['motion'], data['length'], data[
+ 'text']
+ # Randomly select a caption
+ text_data = random.choice(text_list)
+ caption, tokens = text_data['caption'], text_data['tokens']
+
+ if len(tokens) < self.opt.max_text_len:
+ # pad with "unk"
+ tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
+ sent_len = len(tokens)
+ tokens = tokens + ['unk/OTHER'
+ ] * (self.opt.max_text_len + 2 - sent_len)
+ else:
+ # crop
+ tokens = tokens[:self.opt.max_text_len]
+ tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
+ sent_len = len(tokens)
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ len_gap = (m_length - self.max_length) // self.opt.unit_length
+
+ if self.opt.is_train:
+ if m_length != self.max_length:
+ # print("Motion original length:%d_%d"%(m_length, len(motion)))
+ if self.opt.unit_length < 10:
+ coin2 = np.random.choice(['single', 'single', 'double'])
+ else:
+ coin2 = 'single'
+ if len_gap == 0 or (len_gap == 1 and coin2 == 'double'):
+ m_length = self.max_length
+ idx = random.randint(0, m_length - self.max_length)
+ motion = motion[idx:idx + self.max_length]
+ else:
+ if coin2 == 'single':
+ n_m_length = self.max_length + self.opt.unit_length * len_gap
+ else:
+ n_m_length = self.max_length + self.opt.unit_length * (
+ len_gap - 1)
+ idx = random.randint(0, m_length - n_m_length)
+ motion = motion[idx:idx + self.max_length]
+ m_length = n_m_length
+ # print(len_gap, idx, coin2)
+ else:
+ if self.opt.unit_length < 10:
+ coin2 = np.random.choice(['single', 'single', 'double'])
+ else:
+ coin2 = 'single'
+
+ if coin2 == 'double':
+ m_length = (m_length // self.opt.unit_length -
+ 1) * self.opt.unit_length
+ elif coin2 == 'single':
+ m_length = (m_length //
+ self.opt.unit_length) * self.opt.unit_length
+ idx = random.randint(0, len(motion) - m_length)
+ motion = motion[idx:idx + m_length]
+
+ "Z Normalization"
+ motion = (motion - self.mean) / self.std
+
+ return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length
+
+
+'''For use of training text motion matching model, and evaluations'''
+
+
+class Text2MotionDatasetV2(data.Dataset):
+ """
+ Args:
+ std_multiplier: multiply the std by this value; maybe useful for diffusion models by keeping the range of data managable
+ """
+ def __init__(self,
+ opt,
+ mean,
+ std,
+ split_file,
+ w_vectorizer,
+ use_rand_proj=False,
+ proj_matrix_dir=None,
+ traject_only=False,
+ mode='train',
+ random_proj_scale=10.0,
+ augment_type='none',
+ std_scale_shift=(1., 0.), # Test random projection
+ drop_redundant=False):
+ self.opt = opt
+ self.w_vectorizer = w_vectorizer
+ self.max_length = 20
+ self.pointer = 0
+ self.max_motion_length = opt.max_motion_length
+ min_motion_len = 5 if self.opt.dataset_name == 't2m' else 24
+
+ self.use_rand_proj = use_rand_proj
+ self.traject_only = traject_only
+ self.mode = mode
+
+ self.augment_type = augment_type
+ assert self.augment_type in ['none', 'rot', 'full']
+
+ self.std_scale_shift = std_scale_shift
+ self.drop_redundant = drop_redundant
+
+ self.joints_num = opt.joints_num
+
+ data_dict = {}
+ id_list = []
+ with cs.open(split_file, 'r') as f:
+ for line in f.readlines():
+ id_list.append(line.strip())
+
+ # NOTE: Small data for debugging
+ # print(' --- Using small data for debugging ---')
+ # id_list = id_list[:200]
+
+ new_name_list = []
+ length_list = []
+ for name in tqdm(id_list):
+ try:
+ # if True:
+ motion = np.load(pjoin(opt.motion_dir, name + '.npy'))
+ if (len(motion)) < min_motion_len or (len(motion) >= 200):
+ continue
+ text_data = []
+ flag = False
+ with cs.open(pjoin(opt.text_dir, name + '.txt')) as f:
+ for line in f.readlines():
+ text_dict = {}
+ line_split = line.strip().split('#')
+ caption = line_split[0]
+ tokens = line_split[1].split(' ')
+ f_tag = float(line_split[2])
+ to_tag = float(line_split[3])
+ f_tag = 0.0 if np.isnan(f_tag) else f_tag
+ to_tag = 0.0 if np.isnan(to_tag) else to_tag
+
+ text_dict['caption'] = caption
+ text_dict['tokens'] = tokens
+ if f_tag == 0.0 and to_tag == 0.0:
+ flag = True
+ text_data.append(text_dict)
+ else:
+ try:
+ n_motion = motion[int(f_tag * 20):int(to_tag *
+ 20)]
+ if (len(n_motion)) < min_motion_len or (
+ len(n_motion) >= 200):
+ continue
+ new_name = random.choice(
+ 'ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
+ while new_name in data_dict:
+ new_name = random.choice(
+ 'ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
+ data_dict[new_name] = {
+ 'motion': n_motion,
+ 'length': len(n_motion),
+ 'text': [text_dict]
+ }
+ new_name_list.append(new_name)
+ length_list.append(len(n_motion))
+ except:
+ print(line_split)
+ print(line_split[2], line_split[3], f_tag,
+ to_tag, name)
+
+ if flag:
+ motion = motion[:opt.max_motion_length]
+ data_dict[name] = {
+ 'motion': motion,
+ 'length': len(motion),
+ 'text': text_data
+ }
+ new_name_list.append(name)
+ length_list.append(len(motion))
+ except Exception as x:
+ print(f"====#{type(x)}: {x.args}#====")
+
+ name_list, length_list = zip(
+ *sorted(zip(new_name_list, length_list), key=lambda x: x[1]))
+
+ self.mean = mean
+ self.std = std
+
+ self.length_arr = np.array(length_list)
+ self.data_dict = data_dict
+ self.name_list = name_list
+ self.reset_max_len(self.max_length)
+
+ if use_rand_proj:
+ self.init_random_projection(proj_matrix_dir,
+ scale=random_proj_scale)
+
+ def reset_max_len(self, length):
+ assert length <= self.max_motion_length
+ self.pointer = np.searchsorted(self.length_arr, length)
+ print("Pointer Pointing at %d" % self.pointer)
+ self.max_length = length
+
+ def get_std_mean(self, traject_only=None, drop_redundant=None):
+ if traject_only is None:
+ traject_only = self.traject_only
+ if drop_redundant is None:
+ drop_redundant = self.drop_redundant
+
+ joints_num = self.joints_num
+
+ if traject_only:
+ std = self.std[:4]
+ mean = self.mean[:4]
+ elif drop_redundant:
+ std = self.std[:(4 + (joints_num - 1) * 3)]
+ mean = self.mean[:(4 + (joints_num - 1) * 3)]
+ else:
+ std = self.std
+ mean = self.mean
+ std = std * self.std_scale_shift[0] + self.std_scale_shift[1]
+ return std, mean
+
+ def inv_transform(self, data, traject_only=None):
+ if self.use_rand_proj:
+ data = self.inv_random_projection(data)
+ std, mean = self.get_std_mean(traject_only)
+ return data * std + mean
+
+ def inv_transform_th(self, data, traject_only=None, use_rand_proj=None):
+ use_rand_proj = self.use_rand_proj if use_rand_proj is None else use_rand_proj
+ if use_rand_proj:
+ data = self.inv_random_projection(data, mode="th")
+ std, mean = self.get_std_mean(traject_only)
+ return data * torch.from_numpy(std).to(
+ data.device) + torch.from_numpy(mean).to(data.device)
+
+ def transform_th(self, data, traject_only=None, use_rand_proj=None):
+ std, mean = self.get_std_mean(traject_only)
+ data = (data - torch.from_numpy(mean).to(
+ data.device)) / torch.from_numpy(std).to(data.device)
+ use_rand_proj = self.use_rand_proj if use_rand_proj is None else use_rand_proj
+ if use_rand_proj:
+ data = self.random_projection(data, mode="th")
+ return data
+
+ def __len__(self):
+ return len(self.data_dict) - self.pointer
+
+ def __getitem__(self, item):
+ idx = self.pointer + item
+ data = self.data_dict[self.name_list[idx]]
+ motion, m_length, text_list = data['motion'], data['length'], data[
+ 'text']
+ # Randomly select a caption
+ text_data = random.choice(text_list)
+ # text_data = text_list[0] # for rebuttal experiments
+ caption, tokens = text_data['caption'], text_data['tokens']
+
+ joints_num = self.joints_num
+
+ if len(tokens) < self.opt.max_text_len:
+ # pad with "unk"
+ tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
+ sent_len = len(tokens)
+ tokens = tokens + ['unk/OTHER'
+ ] * (self.opt.max_text_len + 2 - sent_len)
+ else:
+ # crop
+ tokens = tokens[:self.opt.max_text_len]
+ tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
+ sent_len = len(tokens)
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ # Crop the motions in to times of 4, and introduce small variations
+ if self.opt.unit_length < 10:
+ coin2 = np.random.choice(['single', 'single', 'double'])
+ else:
+ coin2 = 'single'
+
+ if coin2 == 'double':
+ m_length = (m_length // self.opt.unit_length -
+ 1) * self.opt.unit_length
+ elif coin2 == 'single':
+ m_length = (m_length //
+ self.opt.unit_length) * self.opt.unit_length
+ idx = random.randint(0, len(motion) - m_length)
+ motion = motion[idx:idx + m_length]
+
+ # NOTE: if used for training trajectory model, discard all but the first 4 values
+ if self.traject_only:
+ motion = motion[:, :4]
+
+ if self.augment_type in ['full', 'rot']:
+ # motion [length, 4 or 263]
+ # Random rotation
+ rand_rot = (torch.rand(1, 1) * 2.0 -
+ 1.0) * np.pi / 4. # Rand [-1,1)
+ r_rot_quat = torch.zeros(1, 4)
+ r_rot_quat[..., 0] = torch.cos(rand_rot)
+ r_rot_quat[..., 2] = torch.sin(rand_rot)
+ r_rot_quat = r_rot_quat.repeat(motion.shape[:-1] + (1, ))
+ motion[:, 0:1] = motion[:, 0:1] + rand_rot.numpy()
+
+ pos = torch.zeros(motion.shape[:-1] + (3, ))
+ pos[..., [0, 2]] = torch.from_numpy(motion[..., 1:3])
+ pos = qrot(qinv(r_rot_quat), pos)
+ motion[:, [1, 2]] = pos[:, [0, 2]].numpy()
+
+ # Random translation. Only care about (x,z)
+ if self.augment_type == 'full':
+ trans_size = 3.
+ rand_trans = np.random.rand(1, 2) * 2.0 - 1.0 # Rand [-1,1)
+ rand_trans = rand_trans * trans_size
+ motion[:, [1, 2]] = motion[:, [1, 2]] + rand_trans
+
+ if self.drop_redundant:
+ # Only keep the first 4 values and 21 joint locations
+ assert not self.use_rand_proj
+ motion = motion[:, :(4 + (joints_num - 1) * 3)]
+
+ "Z Normalization"
+ std, mean = self.get_std_mean()
+ motion = (motion - mean) / std
+
+ # Projection
+ # NOTE: Do not do random projection if mode is eval or gt
+ if (not self.mode in ["eval", "gt"]) and self.use_rand_proj:
+ # t x 263
+ motion = self.random_projection(motion)
+
+ if m_length < self.max_motion_length:
+ motion = np.concatenate([
+ motion,
+ np.zeros((self.max_motion_length - m_length, motion.shape[1]))
+ ],
+ axis=0)
+
+ # print(word_embeddings.shape, motion.shape)
+ # print(tokens)
+ return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(
+ tokens)
+
+ def init_random_projection(self, save_at, scale: float):
+ if os.path.isfile(os.path.join(save_at, "rand_proj.npy")):
+ print(f"Loading random projection matrix from {save_at}")
+ self.proj_matrix = np.load(os.path.join(save_at, "rand_proj.npy"))
+ self.inv_proj_matrix = np.load(
+ os.path.join(save_at, "inv_rand_proj.npy"))
+ else:
+ print(f"Creating random projection matrix {scale}")
+ self.proj_matrix = torch.normal(
+ mean=0, std=1.0, size=(263, 263),
+ dtype=torch.float) # / np.sqrt(263)
+
+ # scale first three values (rot spd, x spd, z spd)
+ self.proj_matrix[[0, 1, 2], :] *= scale
+ self.proj_matrix = self.proj_matrix / np.sqrt(263 - 3 +
+ 3 * scale**2)
+ self.inv_proj_matrix = torch.inverse(self.proj_matrix)
+
+ self.proj_matrix = self.proj_matrix.detach().cpu().numpy()
+ self.inv_proj_matrix = self.inv_proj_matrix.detach().cpu().numpy()
+
+ self.proj_matrix_th = torch.from_numpy(self.proj_matrix)
+ self.inv_proj_matrix_th = torch.from_numpy(self.inv_proj_matrix)
+
+ np.save(os.path.join(save_at, "rand_proj.npy"), self.proj_matrix)
+ np.save(os.path.join(save_at, "inv_rand_proj.npy"),
+ self.inv_proj_matrix)
+
+ def random_projection(self, motion, mode="np"):
+ if mode == "th":
+ return torch.matmul(motion, self.proj_matrix_th.to(motion.device))
+ return np.matmul(motion, self.proj_matrix)
+
+ def inv_random_projection(self, data, mode="np"):
+ if mode == "th":
+ return torch.matmul(data, self.inv_proj_matrix_th.to(data.device))
+ return np.matmul(data, self.inv_proj_matrix)
+
+
+'''For use of training baseline'''
+
+
+class Text2MotionDatasetBaseline(data.Dataset):
+ def __init__(self, opt, mean, std, split_file, w_vectorizer):
+ self.opt = opt
+ self.w_vectorizer = w_vectorizer
+ self.max_length = 20
+ self.pointer = 0
+ self.max_motion_length = opt.max_motion_length
+ min_motion_len = 40 if self.opt.dataset_name == 't2m' else 24
+
+ data_dict = {}
+ id_list = []
+ with cs.open(split_file, 'r') as f:
+ for line in f.readlines():
+ id_list.append(line.strip())
+ # id_list = id_list[:200]
+
+ new_name_list = []
+ length_list = []
+ for name in tqdm(id_list):
+ try:
+ motion = np.load(pjoin(opt.motion_dir, name + '.npy'))
+ if (len(motion)) < min_motion_len or (len(motion) >= 200):
+ continue
+ text_data = []
+ flag = False
+ with cs.open(pjoin(opt.text_dir, name + '.txt')) as f:
+ for line in f.readlines():
+ text_dict = {}
+ line_split = line.strip().split('#')
+ caption = line_split[0]
+ tokens = line_split[1].split(' ')
+ f_tag = float(line_split[2])
+ to_tag = float(line_split[3])
+ f_tag = 0.0 if np.isnan(f_tag) else f_tag
+ to_tag = 0.0 if np.isnan(to_tag) else to_tag
+
+ text_dict['caption'] = caption
+ text_dict['tokens'] = tokens
+ if f_tag == 0.0 and to_tag == 0.0:
+ flag = True
+ text_data.append(text_dict)
+ else:
+ try:
+ n_motion = motion[int(f_tag * 20):int(to_tag *
+ 20)]
+ if (len(n_motion)) < min_motion_len or (
+ len(n_motion) >= 200):
+ continue
+ new_name = random.choice(
+ 'ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
+ while new_name in data_dict:
+ new_name = random.choice(
+ 'ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
+ data_dict[new_name] = {
+ 'motion': n_motion,
+ 'length': len(n_motion),
+ 'text': [text_dict]
+ }
+ new_name_list.append(new_name)
+ length_list.append(len(n_motion))
+ except:
+ print(line_split)
+ print(line_split[2], line_split[3], f_tag,
+ to_tag, name)
+ # break
+
+ if flag:
+ data_dict[name] = {
+ 'motion': motion,
+ 'length': len(motion),
+ 'text': text_data
+ }
+ new_name_list.append(name)
+ length_list.append(len(motion))
+ except:
+ pass
+
+ name_list, length_list = zip(
+ *sorted(zip(new_name_list, length_list), key=lambda x: x[1]))
+
+ self.mean = mean
+ self.std = std
+ self.length_arr = np.array(length_list)
+ self.data_dict = data_dict
+ self.name_list = name_list
+ self.reset_max_len(self.max_length)
+
+ def reset_max_len(self, length):
+ assert length <= self.max_motion_length
+ self.pointer = np.searchsorted(self.length_arr, length)
+ print("Pointer Pointing at %d" % self.pointer)
+ self.max_length = length
+
+ def inv_transform(self, data):
+ return data * self.std + self.mean
+
+ def __len__(self):
+ return len(self.data_dict) - self.pointer
+
+ def __getitem__(self, item):
+ idx = self.pointer + item
+ data = self.data_dict[self.name_list[idx]]
+ motion, m_length, text_list = data['motion'], data['length'], data[
+ 'text']
+ # Randomly select a caption
+ text_data = random.choice(text_list)
+ caption, tokens = text_data['caption'], text_data['tokens']
+
+ if len(tokens) < self.opt.max_text_len:
+ # pad with "unk"
+ tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
+ sent_len = len(tokens)
+ tokens = tokens + ['unk/OTHER'
+ ] * (self.opt.max_text_len + 2 - sent_len)
+ else:
+ # crop
+ tokens = tokens[:self.opt.max_text_len]
+ tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
+ sent_len = len(tokens)
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ len_gap = (m_length - self.max_length) // self.opt.unit_length
+
+ if m_length != self.max_length:
+ # print("Motion original length:%d_%d"%(m_length, len(motion)))
+ if self.opt.unit_length < 10:
+ coin2 = np.random.choice(['single', 'single', 'double'])
+ else:
+ coin2 = 'single'
+ if len_gap == 0 or (len_gap == 1 and coin2 == 'double'):
+ m_length = self.max_length
+ s_idx = random.randint(0, m_length - self.max_length)
+ else:
+ if coin2 == 'single':
+ n_m_length = self.max_length + self.opt.unit_length * len_gap
+ else:
+ n_m_length = self.max_length + self.opt.unit_length * (
+ len_gap - 1)
+ s_idx = random.randint(0, m_length - n_m_length)
+ m_length = n_m_length
+ else:
+ s_idx = 0
+
+ src_motion = motion[s_idx:s_idx + m_length]
+ tgt_motion = motion[s_idx:s_idx + self.max_length]
+
+ "Z Normalization"
+ src_motion = (src_motion - self.mean) / self.std
+ tgt_motion = (tgt_motion - self.mean) / self.std
+
+ if m_length < self.max_motion_length:
+ src_motion = np.concatenate([
+ src_motion,
+ np.zeros((self.max_motion_length - m_length, motion.shape[1]))
+ ],
+ axis=0)
+ # print(m_length, src_motion.shape, tgt_motion.shape)
+ # print(word_embeddings.shape, motion.shape)
+ # print(tokens)
+ return word_embeddings, caption, sent_len, src_motion, tgt_motion, m_length
+
+
+class MotionDatasetV2(data.Dataset):
+ def __init__(self, opt, mean, std, split_file):
+ self.opt = opt
+ joints_num = opt.joints_num
+
+ self.data = []
+ self.lengths = []
+ id_list = []
+ with cs.open(split_file, 'r') as f:
+ for line in f.readlines():
+ id_list.append(line.strip())
+
+ for name in tqdm(id_list):
+ try:
+ motion = np.load(pjoin(opt.motion_dir, name + '.npy'))
+ if motion.shape[0] < opt.window_size:
+ continue
+ self.lengths.append(motion.shape[0] - opt.window_size)
+ self.data.append(motion)
+ except:
+ # Some motion may not exist in KIT dataset
+ pass
+
+ self.cumsum = np.cumsum([0] + self.lengths)
+
+ if opt.is_train:
+ # root_rot_velocity (B, seq_len, 1)
+ std[0:1] = std[0:1] / opt.feat_bias
+ # root_linear_velocity (B, seq_len, 2)
+ std[1:3] = std[1:3] / opt.feat_bias
+ # root_y (B, seq_len, 1)
+ std[3:4] = std[3:4] / opt.feat_bias
+ # ric_data (B, seq_len, (joints_num - 1)*3)
+ std[4:4 + (joints_num - 1) * 3] = std[4:4 +
+ (joints_num - 1) * 3] / 1.0
+ # rot_data (B, seq_len, (joints_num - 1)*6)
+ std[4 + (joints_num - 1) * 3:4 +
+ (joints_num - 1) * 9] = std[4 + (joints_num - 1) * 3:4 +
+ (joints_num - 1) * 9] / 1.0
+ # local_velocity (B, seq_len, joints_num*3)
+ std[4 + (joints_num - 1) * 9:4 + (joints_num - 1) * 9 +
+ joints_num *
+ 3] = std[4 + (joints_num - 1) * 9:4 +
+ (joints_num - 1) * 9 + joints_num * 3] / 1.0
+ # foot contact (B, seq_len, 4)
+ std[4 + (joints_num - 1) * 9 +
+ joints_num * 3:] = std[4 + (joints_num - 1) * 9 +
+ joints_num * 3:] / opt.feat_bias
+
+ assert 4 + (joints_num -
+ 1) * 9 + joints_num * 3 + 4 == mean.shape[-1]
+ np.save(pjoin(opt.meta_dir, 'mean.npy'), mean)
+ np.save(pjoin(opt.meta_dir, 'std.npy'), std)
+
+ self.mean = mean
+ self.std = std
+ print("Total number of motions {}, snippets {}".format(
+ len(self.data), self.cumsum[-1]))
+
+ def inv_transform(self, data):
+ return data * self.std + self.mean
+
+ def __len__(self):
+ return self.cumsum[-1]
+
+ def __getitem__(self, item):
+ if item != 0:
+ motion_id = np.searchsorted(self.cumsum, item) - 1
+ idx = item - self.cumsum[motion_id] - 1
+ else:
+ motion_id = 0
+ idx = 0
+ motion = self.data[motion_id][idx:idx + self.opt.window_size]
+ "Z Normalization"
+ motion = (motion - self.mean) / self.std
+
+ return motion
+
+
+class RawTextDataset(data.Dataset):
+ def __init__(self, opt, mean, std, text_file, w_vectorizer):
+ self.mean = mean
+ self.std = std
+ self.opt = opt
+ self.data_dict = []
+ self.nlp = spacy.load('en_core_web_sm')
+
+ with cs.open(text_file) as f:
+ for line in f.readlines():
+ word_list, pos_list = self.process_text(line.strip())
+ tokens = [
+ '%s/%s' % (word_list[i], pos_list[i])
+ for i in range(len(word_list))
+ ]
+ self.data_dict.append({
+ 'caption': line.strip(),
+ "tokens": tokens
+ })
+
+ self.w_vectorizer = w_vectorizer
+ print("Total number of descriptions {}".format(len(self.data_dict)))
+
+ def process_text(self, sentence):
+ sentence = sentence.replace('-', '')
+ doc = self.nlp(sentence)
+ word_list = []
+ pos_list = []
+ for token in doc:
+ word = token.text
+ if not word.isalpha():
+ continue
+ if (token.pos_ == 'NOUN'
+ or token.pos_ == 'VERB') and (word != 'left'):
+ word_list.append(token.lemma_)
+ else:
+ word_list.append(word)
+ pos_list.append(token.pos_)
+ return word_list, pos_list
+
+ def inv_transform(self, data):
+ return data * self.std + self.mean
+
+ def __len__(self):
+ return len(self.data_dict)
+
+ def __getitem__(self, item):
+ data = self.data_dict[item]
+ caption, tokens = data['caption'], data['tokens']
+
+ if len(tokens) < self.opt.max_text_len:
+ # pad with "unk"
+ tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
+ sent_len = len(tokens)
+ tokens = tokens + ['unk/OTHER'
+ ] * (self.opt.max_text_len + 2 - sent_len)
+ else:
+ # crop
+ tokens = tokens[:self.opt.max_text_len]
+ tokens = ['sos/OTHER'] + tokens + ['eos/OTHER']
+ sent_len = len(tokens)
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ return word_embeddings, pos_one_hots, caption, sent_len
+
+
+class TextOnlyDataset(data.Dataset):
+ """
+ Args:
+ std_multiplier: multiply the std by this value; maybe useful for diffusion models by keeping the range of data managable
+ """
+ def __init__(self,
+ opt,
+ mean,
+ std,
+ split_file,
+ use_rand_proj=False,
+ proj_matrix_dir=None,
+ traject_only=False,
+ std_scale_shift=(1., 0.),
+ drop_redundant=False):
+ self.mean = mean
+ self.std = std
+ self.opt = opt
+ self.data_dict = []
+ self.max_length = 20
+ self.pointer = 0
+ self.fixed_length = 120
+
+ self.use_rand_proj = use_rand_proj
+ if use_rand_proj:
+ self.init_random_projection(proj_matrix_dir)
+ self.traject_only = traject_only
+ self.std_scale_shift = std_scale_shift
+ self.drop_redundant = drop_redundant
+
+ data_dict = {}
+ id_list = []
+ with cs.open(split_file, 'r') as f:
+ for line in f.readlines():
+ id_list.append(line.strip())
+
+ new_name_list = []
+ length_list = []
+ for name in tqdm(id_list):
+ try:
+ text_data = []
+ flag = False
+ with cs.open(pjoin(opt.text_dir, name + '.txt')) as f:
+ for line in f.readlines():
+ text_dict = {}
+ line_split = line.strip().split('#')
+ caption = line_split[0]
+ tokens = line_split[1].split(' ')
+ f_tag = float(line_split[2])
+ to_tag = float(line_split[3])
+ f_tag = 0.0 if np.isnan(f_tag) else f_tag
+ to_tag = 0.0 if np.isnan(to_tag) else to_tag
+
+ text_dict['caption'] = caption
+ text_dict['tokens'] = tokens
+ if f_tag == 0.0 and to_tag == 0.0:
+ flag = True
+ text_data.append(text_dict)
+ else:
+ try:
+ new_name = random.choice(
+ 'ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
+ while new_name in data_dict:
+ new_name = random.choice(
+ 'ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name
+ data_dict[new_name] = {'text': [text_dict]}
+ new_name_list.append(new_name)
+ except:
+ print(line_split)
+ print(line_split[2], line_split[3], f_tag,
+ to_tag, name)
+
+ if flag:
+ data_dict[name] = {'text': text_data}
+ new_name_list.append(name)
+ except:
+ pass
+
+ self.length_arr = np.array(length_list)
+ self.data_dict = data_dict
+ self.name_list = new_name_list
+
+ def get_std_mean(self, traject_only=None, drop_redundant=None):
+ if traject_only is None:
+ traject_only = self.traject_only
+ if drop_redundant is None:
+ drop_redundant = self.drop_redundant
+
+ joints_num = self.joints_num
+
+ if traject_only:
+ std = self.std[:4]
+ mean = self.mean[:4]
+ elif drop_redundant:
+ std = self.std[:(4 + (joints_num - 1) * 3)]
+ mean = self.mean[:(4 + (joints_num - 1) * 3)]
+ else:
+ std = self.std
+ mean = self.mean
+ std = std * self.std_scale_shift[0] + self.std_scale_shift[1]
+ return std, mean
+
+ def inv_transform(self, data, traject_only=None, use_rand_proj=None):
+ use_rand_proj = self.use_rand_proj if use_rand_proj is None else use_rand_proj
+ if use_rand_proj:
+ data = self.inv_random_projection(data)
+ std, mean = self.get_std_mean(traject_only)
+ return data * std + mean
+
+ def inv_transform_th(self, data, traject_only=None, use_rand_proj=None):
+ use_rand_proj = self.use_rand_proj if use_rand_proj is None else use_rand_proj
+ if use_rand_proj:
+ data = self.inv_random_projection(data, mode="th")
+ std, mean = self.get_std_mean(traject_only)
+ return data * torch.from_numpy(std).to(
+ data.device) + torch.from_numpy(mean).to(data.device)
+
+ def transform_th(self, data, traject_only=None, use_rand_proj=None):
+ std, mean = self.get_std_mean(traject_only)
+ data = (data - torch.from_numpy(mean).to(
+ data.device)) / torch.from_numpy(std).to(data.device)
+ use_rand_proj = self.use_rand_proj if use_rand_proj is None else use_rand_proj
+ if use_rand_proj:
+ data = self.random_projection(data, mode="th")
+ return data
+
+ def __len__(self):
+ return len(self.data_dict)
+
+ def __getitem__(self, item):
+ idx = self.pointer + item
+ data = self.data_dict[self.name_list[idx]]
+ text_list = data['text']
+
+ # Randomly select a caption
+ text_data = random.choice(text_list)
+ caption, tokens = text_data['caption'], text_data['tokens']
+ return None, None, caption, None, np.array([0
+ ]), self.fixed_length, None
+ # fixed_length can be set from outside before sampling
+
+ def init_random_projection(self, save_at):
+ if os.path.isfile(os.path.join(save_at, "rand_proj.npy")):
+ self.proj_matrix = np.load(os.path.join(save_at, "rand_proj.npy"))
+ self.inv_proj_matrix = np.load(
+ os.path.join(save_at, "inv_rand_proj.npy"))
+ self.proj_matrix_th = torch.from_numpy(self.proj_matrix)
+ self.inv_proj_matrix_th = torch.from_numpy(self.inv_proj_matrix)
+ else:
+ print("... No projection matrix ...")
+ assert False
+
+ def random_projection(self, motion, mode="np"):
+ if mode == "th":
+ return torch.matmul(motion, self.proj_matrix_th.to(motion.device))
+ return np.matmul(motion, self.proj_matrix)
+
+ def inv_random_projection(self, data, mode="np"):
+ if mode == "th":
+ return torch.matmul(data, self.inv_proj_matrix_th.to(data.device))
+ return np.matmul(data, self.inv_proj_matrix)
+
+class CustomRig(data.Dataset):
+ def __init__(self,
+ mode,
+ datapath="./dataset/humanml_opt.txt", ## FIXME: figure out what settings are needed
+ split="train", ## FIXME: why is it train here, for t2m right
+ use_abs3d=False, ## FIXME: do we need to set this if we are only using abs3d anyway
+ traject_only=False,
+ use_random_projection=False,
+ random_projection_scale=None,
+ augment_type="none",
+ std_scale_shift=(1., 0.),
+ drop_redundant=False,
+ num_frames=None,
+ **kwargs):
+ self.mode = mode
+
+ self.dataset_name = "t2m"
+ self.dataname = "t2m"
+
+ # Configurations of T2M dataset and KIT dataset is almost the same
+ abs_base_path = '.'
+ dataset_opt_path = pjoin(abs_base_path, datapath)
+ device = None # torch.device('cuda:4') # This param is not in use in this context
+ # TODO: modernize get_opt
+ opt = get_opt(dataset_opt_path, device, mode, use_abs3d=use_abs3d, max_motion_length=num_frames)
+ opt.motion_dir = pjoin(abs_base_path, opt.motion_dir)
+ opt.text_dir = pjoin(abs_base_path, opt.text_dir)
+ opt.model_dir = pjoin(abs_base_path, opt.model_dir)
+ opt.checkpoints_dir = pjoin(abs_base_path, opt.checkpoints_dir)
+ opt.data_root = pjoin(abs_base_path, opt.data_root)
+ opt.save_root = pjoin(abs_base_path, opt.save_root)
+ opt.meta_dir = './dataset'
+ self.opt = opt
+ print('Loading dataset %s ...' % opt.dataset_name)
+
+ self.absolute_3d = use_abs3d
+ self.traject_only = traject_only
+ self.use_rand_proj = use_random_projection
+ self.random_proj_scale = random_projection_scale
+ self.augment_type = augment_type
+ self.std_scale_shift = std_scale_shift
+ self.drop_redundant = drop_redundant
+
+ if self.use_rand_proj:
+ if self.random_proj_scale == 10:
+ # NOTE: legacy code
+ proj_matrix_dir = "./dataset"
+ else:
+ proj_matrix_dir = os.path.join(
+ f'save/random_proj_{self.random_proj_scale:.0f}')
+ os.makedirs(proj_matrix_dir, exist_ok=True)
+ print(f'proj_matrix_dir = {proj_matrix_dir}')
+ else:
+ proj_matrix_dir = None
+
+ ###
+ print(f">>> (INFO) >>> mode = {mode}")
+
+ if self.absolute_3d:
+ # If mode is 'gt' or 'eval', we will load the *original* dataset. Not the absolute rot, x, z.
+ if mode == 'gt':
+ # used by T2M models (including evaluators)
+ self.mean = np.load(
+ pjoin(opt.meta_dir, f'{opt.dataset_name}_mean.npy'))
+ self.std = np.load(
+ pjoin(opt.meta_dir, f'{opt.dataset_name}_std.npy'))
+ # elif mode == :
+ # # used by MDM models
+ # self.mean = np.load(pjoin(opt.data_root, 'Mean.npy'))
+ # self.std = np.load(pjoin(opt.data_root, 'Std.npy'))
+ elif mode in ['train', 'eval', 'text_only']:
+ '''
+ The 'eval' is here because we want inv_transform to work the same way at inference for model with abs3d,
+ regradless of which dataset is loaded.
+ '''
+ # used by absolute model
+ self.mean = np.load(pjoin(opt.data_root, 'Mean_abs_3d.npy'))
+ self.std = np.load(pjoin(opt.data_root, 'Std_abs_3d.npy'))
+
+ self.mean_gt = np.load(
+ pjoin(opt.meta_dir, f'{opt.dataset_name}_mean.npy'))
+ self.std_gt = np.load(
+ pjoin(opt.meta_dir, f'{opt.dataset_name}_std.npy'))
+ self.mean_rel = np.load(pjoin(opt.data_root, 'Mean.npy'))
+ self.std_rel = np.load(pjoin(opt.data_root, 'Std.npy'))
+ self.mean_abs = np.load(pjoin(opt.data_root, 'Mean_abs_3d.npy'))
+ self.std_abs = np.load(pjoin(opt.data_root, 'Std_abs_3d.npy'))
+ elif mode == 'gt':
+ # used by T2M models (including evaluators)
+ self.mean = np.load(
+ pjoin(opt.meta_dir, f'{opt.dataset_name}_mean.npy'))
+ self.std = np.load(
+ pjoin(opt.meta_dir, f'{opt.dataset_name}_std.npy'))
+ elif mode in ['train', 'eval', 'text_only']:
+ # used by our models
+ self.mean = np.load(pjoin(opt.data_root, 'Mean.npy'))
+ self.std = np.load(pjoin(opt.data_root, 'Std.npy'))
+
+ if mode == 'eval':
+ # used by T2M models (including evaluators)
+ # this is to translate their norms to ours
+ self.mean_for_eval = np.load(
+ pjoin(opt.meta_dir, f'{opt.dataset_name}_mean.npy'))
+ self.std_for_eval = np.load(
+ pjoin(opt.meta_dir, f'{opt.dataset_name}_std.npy'))
+
+ self.split_file = pjoin(opt.data_root, f'{split}.txt')
+
+ if mode == 'text_only':
+ assert self.random_proj_scale == 10, 'mode text only support only random projection scale 10'
+ print(
+ f't2m dataset aug: {self.augment_type} std_scale_shift: {self.std_scale_shift}'
+ )
+ print(f't2m dataset drop redundant information: {self.drop_redundant}')
+ self.t2m_dataset = TextOnlyDataset(
+ self.opt,
+ self.mean,
+ self.std,
+ self.split_file,
+ use_rand_proj=self.use_rand_proj,
+ proj_matrix_dir=proj_matrix_dir,
+ traject_only=self.traject_only,
+ std_scale_shift=self.std_scale_shift,
+ drop_redundant=self.drop_redundant,)
+ else:
+ self.w_vectorizer = WordVectorizer(pjoin(abs_base_path, 'glove'),
+ 'our_vab')
+ print(
+ f't2m dataset aug: {self.augment_type} std_scale_shift: {self.std_scale_shift}'
+ )
+ print(f't2m dataset drop redundant information: {self.drop_redundant}')
+ self.t2m_dataset = Text2MotionDatasetV2(
+ self.opt,
+ self.mean,
+ self.std,
+ self.split_file,
+ self.w_vectorizer,
+ use_rand_proj=self.use_rand_proj,
+ proj_matrix_dir=proj_matrix_dir,
+ traject_only=self.traject_only,
+ mode=mode,
+ random_proj_scale=self.random_proj_scale,
+ augment_type=self.augment_type,
+ std_scale_shift=self.std_scale_shift,
+ drop_redundant=self.drop_redundant,)
+ # End test
+ self.num_actions = 1 # dummy placeholder
+
+ assert len(self.t2m_dataset) > 1, 'You loaded an empty dataset, ' \
+ 'it is probably because your data dir has only texts and no motions.\n' \
+ 'To train and evaluate MDM you should get the FULL data as described ' \
+ 'in the README file.'
+
+ # Load necessay variables for converting raw motion to processed data
+ data_dir = './dataset/000021.npy' ## FIXME: need to know how to reverse-engineer this, currently just using old rig data
+ self.n_raw_offsets = torch.from_numpy(custom_raw_offsets)
+ self.kinematic_chain = custom_kinematic_chain
+ # Get offsets of target skeleton
+ example_data = np.load(data_dir)
+ example_data = example_data.reshape(len(example_data), -1, 3)
+ example_data = torch.from_numpy(example_data)
+ tgt_skel = Skeleton(self.n_raw_offsets, self.kinematic_chain, 'cpu')
+ # (joints_num, 3)
+ tgt_offsets = tgt_skel.get_offsets_joints(example_data[0])
+
+ def __getitem__(self, item):
+ return self.t2m_dataset.__getitem__(item)
+
+ def __len__(self):
+ return self.t2m_dataset.__len__()
+
+ def motion_to_rel_data(self, motion, model):
+ motion_bu = motion.detach().clone()
+ # Right/Left foot
+ fid_r, fid_l = [9, 10], [4, 5]
+ # Face direction, r_hip, l_hip, sdr_r, sdr_l
+ face_joint_indx = [6, 1, 23, 18]
+ sample_rel_np_list = []
+ for ii in range(len(motion)):
+ # Data need to be [120 (timestep), 22, 3] to get feature
+ sample_rel = extract_features(
+ motion[ii].detach().cpu().clone().permute(2, 0,
+ 1).cpu().numpy(),
+ 0.002, self.n_raw_offsets, self.kinematic_chain,
+ face_joint_indx, fid_r, fid_l)
+ # Duplicate last motion step to match the size
+ sample_rel = torch.from_numpy(sample_rel).unsqueeze(0).float()
+ sample_rel = torch.cat(
+ [sample_rel, sample_rel[0:1, -1:, :].clone()], dim=1)
+ # Normalize with relative normalization
+ sample_rel = (sample_rel - self.mean_rel) / self.std_rel
+ sample_rel = sample_rel.unsqueeze(1).permute(0, 3, 1, 2)
+ sample_rel = sample_rel.to(motion.device)
+ sample_rel_np_list.append(sample_rel)
+
+ processed_data = torch.cat(sample_rel_np_list, axis=0)
+
+ return processed_data
+
+
+ def motion_to_abs_data(self, motion, model):
+ """
+ Follows how abs3d dataset is initially created.
+ First, create the relative data, then compute the absolute root rot/pos from it, and replace it into the relative date.
+ """
+ motion_bu = motion.detach().clone() # [bs, 22, 3, 196]
+ # Right/Left foot
+ fid_r, fid_l = [9, 10], [4, 5]
+ # Face direction, r_hip, l_hip, sdr_r, sdr_l
+ face_joint_indx = [6, 1, 23, 18]
+ sample_abs_np_list = []
+ for ii in range(len(motion)):
+ # Data need to be [120 (timestep), 22, 3] to get feature
+ sample_rel = extract_features(
+ motion[ii].detach().cpu().clone().permute(2, 0,
+ 1).cpu().numpy(),
+ 0.002, self.n_raw_offsets, self.kinematic_chain,
+ face_joint_indx, fid_r, fid_l)
+ # Duplicate last motion step to match the size
+ sample_rel = torch.from_numpy(sample_rel).unsqueeze(0).float()
+ sample_rel = torch.cat([sample_rel, sample_rel[0:1, -1:, :].clone()], dim=1) # [1, 196, 263]
+ # Compute absolute root information instead of relative
+ from data_loaders.custom.scripts.motion_process import recover_root_rot_pos
+ r_rot_quat, r_pos, rot_ang = recover_root_rot_pos(sample_rel[None], abs_3d=False, return_rot_ang=True)
+ sample_abs = sample_rel[None].clone()
+ sample_abs[..., 0] = rot_ang
+ sample_abs[..., [1, 2]] = r_pos[..., [0, 2]]
+ # Normalize with absolute normalization
+ sample_abs = (sample_abs - self.mean_abs) / self.std_abs # TODO: Check if correct stats are used
+ sample_abs = sample_abs.permute(0, 3, 1, 2)
+ sample_abs = sample_abs.to(motion.device)
+ sample_abs_np_list.append(sample_abs)
+
+ processed_data = torch.cat(sample_abs_np_list, axis=0)
+
+ return processed_data
+
+
+def sample_to_motion(sample_abs, dataset, model):
+ n_joints = 27
+ # (bs, 12 * n_joints - 1, 1, 120)
+ # In case of random projection, this already includes undoing the random projection
+ sample = dataset.t2m_dataset.inv_transform(sample_abs.cpu().permute(
+ 0, 2, 3, 1)).float()
+
+ sample = recover_from_ric(sample, n_joints, abs_3d=True)
+ sample = sample.view(-1, *sample.shape[2:]).permute(0, 2, 3, 1)
+
+ rot2xyz_pose_rep = 'xyz'
+ rot2xyz_mask = None
+ sample = model.rot2xyz(x=sample,
+ mask=rot2xyz_mask,
+ pose_rep=rot2xyz_pose_rep,
+ glob=True,
+ translation=True,
+ jointstype='smpl',
+ vertstrans=True,
+ betas=None,
+ beta=0,
+ glob_rot=None,
+ get_rotations_back=False)
+ return sample
+
+
+def abs3d_to_rel(sample_abs, dataset, model):
+ '''We want to change the first 3 values from absolute to relative
+ sample_abs shape [bs, 263, 1, 196]
+ '''
+ n_joints = 27
+ # (bs, 263, 1, 120)
+ # In case of random projection, this already includes undoing the random projection
+ sample = dataset.t2m_dataset.inv_transform(sample_abs.cpu().permute(
+ 0, 2, 3, 1)).float()
+
+ sample = recover_from_ric(sample, n_joints, abs_3d=True)
+ sample = sample.view(-1, *sample.shape[2:]).permute(0, 2, 3, 1)
+
+ rot2xyz_pose_rep = 'xyz'
+ rot2xyz_mask = None
+ sample = model.rot2xyz(x=sample,
+ mask=rot2xyz_mask,
+ pose_rep=rot2xyz_pose_rep,
+ glob=True,
+ translation=True,
+ jointstype='smpl',
+ vertstrans=True,
+ betas=None,
+ beta=0,
+ glob_rot=None,
+ get_rotations_back=False)
+
+ # sample now shape [32, 22, 3, 196].
+ # from data_loaders.custom.utils.plot_script import plot_3d_motion
+ # plot_3d_motion("./test_positions_1.mp4", dataset.kinematic_chain, sample[4].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+ # Now convert skeleton back to sample with relative representation
+ sample_rel = dataset.motion_to_rel_data(sample, model)
+
+ return sample_rel
+
+
+def rel_to_abs3d(sample_rel, dataset, model):
+ """We want to change the first 3 values from relative to absolute
+
+ Args:
+ sample_rel (torch.tensor): shape [bs, 263, 1, 196]
+
+ Returns:
+ sample_abs (torch.tensor): shape [bs, 263, 1, 196]
+ """
+ n_joints = 27
+
+ sample = dataset.t2m_dataset.inv_transform(sample_rel.cpu().permute(0, 2, 3, 1)).float()
+
+ sample = recover_from_ric(sample, n_joints, abs_3d=False)
+ sample = sample.view(-1, *sample.shape[2:]).permute(0, 2, 3, 1) # [bs, 22, 3, 196]
+
+ rot2xyz_pose_rep = 'xyz'
+ rot2xyz_mask = None
+ sample = model.rot2xyz(x=sample,
+ mask=rot2xyz_mask,
+ pose_rep=rot2xyz_pose_rep,
+ glob=True,
+ translation=True,
+ jointstype='smpl',
+ vertstrans=True,
+ betas=None,
+ beta=0,
+ glob_rot=None,
+ get_rotations_back=False)
+
+ # sample now shape [32, 22, 3, 196].
+ # from data_loaders.custom.utils.plot_script import plot_3d_motion
+ # plot_3d_motion("./test_positions_1.mp4", dataset.kinematic_chain, sample[4].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+ # Now convert skeleton back to sample with absolute representation
+ sample_abs = dataset.motion_to_abs_data(sample, model)
+
+ return sample_abs
diff --git a/data_loaders/custom/motion_loaders/__init__.py b/data_loaders/custom/motion_loaders/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/data_loaders/custom/motion_loaders/comp_v6_model_dataset.py b/data_loaders/custom/motion_loaders/comp_v6_model_dataset.py
new file mode 100644
index 0000000..d175c23
--- /dev/null
+++ b/data_loaders/custom/motion_loaders/comp_v6_model_dataset.py
@@ -0,0 +1,1040 @@
+import torch
+from utils.fixseed import fixseed
+from networks.modules import *
+from networks.trainers import CompTrainerV6
+from torch.utils.data import Dataset, DataLoader
+from os.path import join as pjoin
+from tqdm import tqdm
+from utils import dist_util
+import os
+import copy
+from functools import partial
+
+from data_loaders.custom.data.dataset import abs3d_to_rel, sample_to_motion
+from scripts.motion_process import recover_from_ric
+from utils.metrics import calculate_skating_ratio
+from sample.gmd.condition import (cond_fn_key_location, get_target_from_kframes, get_target_and_inpt_from_kframes_batch,
+ log_trajectory_from_xstart, get_inpainting_motion_from_traj, get_inpainting_motion_from_gt,
+ cond_fn_key_location, compute_kps_error, cond_fn_sdf,
+ CondKeyLocations, CondKeyLocationsWithSdf)
+
+
+def build_models(opt):
+ if opt.text_enc_mod == 'bigru':
+ text_encoder = TextEncoderBiGRU(word_size=opt.dim_word,
+ pos_size=opt.dim_pos_ohot,
+ hidden_size=opt.dim_text_hidden,
+ device=opt.device)
+ text_size = opt.dim_text_hidden * 2
+ else:
+ raise Exception("Text Encoder Mode not Recognized!!!")
+
+ seq_prior = TextDecoder(text_size=text_size,
+ input_size=opt.dim_att_vec + opt.dim_movement_latent,
+ output_size=opt.dim_z,
+ hidden_size=opt.dim_pri_hidden,
+ n_layers=opt.n_layers_pri)
+
+
+ seq_decoder = TextVAEDecoder(text_size=text_size,
+ input_size=opt.dim_att_vec + opt.dim_z + opt.dim_movement_latent,
+ output_size=opt.dim_movement_latent,
+ hidden_size=opt.dim_dec_hidden,
+ n_layers=opt.n_layers_dec)
+
+ att_layer = AttLayer(query_dim=opt.dim_pos_hidden,
+ key_dim=text_size,
+ value_dim=opt.dim_att_vec)
+
+ movement_enc = MovementConvEncoder(opt.dim_pose - 4, opt.dim_movement_enc_hidden, opt.dim_movement_latent)
+ movement_dec = MovementConvDecoder(opt.dim_movement_latent, opt.dim_movement_dec_hidden, opt.dim_pose)
+
+ len_estimator = MotionLenEstimatorBiGRU(opt.dim_word, opt.dim_pos_ohot, 512, opt.num_classes)
+
+ # latent_dis = LatentDis(input_size=opt.dim_z * 2)
+ checkpoints = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'length_est_bigru', 'model', 'latest.tar'), map_location=opt.device)
+ len_estimator.load_state_dict(checkpoints['estimator'])
+ len_estimator.to(opt.device)
+ len_estimator.eval()
+
+ # return text_encoder, text_decoder, att_layer, vae_pri, vae_dec, vae_pos, motion_dis, movement_dis, latent_dis
+ return text_encoder, seq_prior, seq_decoder, att_layer, movement_enc, movement_dec, len_estimator
+
+class CompV6GeneratedDataset(Dataset):
+
+ def __init__(self, opt, dataset, w_vectorizer, mm_num_samples, mm_num_repeats):
+ assert mm_num_samples < len(dataset)
+ print(opt.model_dir)
+
+ dataloader = DataLoader(dataset, batch_size=1, num_workers=1, shuffle=True)
+ text_enc, seq_pri, seq_dec, att_layer, mov_enc, mov_dec, len_estimator = build_models(opt)
+ trainer = CompTrainerV6(opt, text_enc, seq_pri, seq_dec, att_layer, mov_dec, mov_enc=mov_enc)
+ epoch, it, sub_ep, schedule_len = trainer.load(pjoin(opt.model_dir, opt.which_epoch + '.tar'))
+ generated_motion = []
+ mm_generated_motions = []
+ mm_idxs = np.random.choice(len(dataset), mm_num_samples, replace=False)
+ mm_idxs = np.sort(mm_idxs)
+ min_mov_length = 10 if opt.dataset_name == 't2m' else 6
+ # print(mm_idxs)
+
+ print('Loading model: Epoch %03d Schedule_len %03d' % (epoch, schedule_len))
+ trainer.eval_mode()
+ trainer.to(opt.device)
+ with torch.no_grad():
+ for i, data in tqdm(enumerate(dataloader)):
+ word_emb, pos_ohot, caption, cap_lens, motions, m_lens, tokens = data
+ tokens = tokens[0].split('_')
+ word_emb = word_emb.detach().to(opt.device).float()
+ pos_ohot = pos_ohot.detach().to(opt.device).float()
+
+ pred_dis = len_estimator(word_emb, pos_ohot, cap_lens)
+ pred_dis = nn.Softmax(-1)(pred_dis).squeeze()
+
+ mm_num_now = len(mm_generated_motions)
+ is_mm = True if ((mm_num_now < mm_num_samples) and (i == mm_idxs[mm_num_now])) else False
+
+ repeat_times = mm_num_repeats if is_mm else 1
+ mm_motions = []
+ for t in range(repeat_times):
+ mov_length = torch.multinomial(pred_dis, 1, replacement=True)
+ if mov_length < min_mov_length:
+ mov_length = torch.multinomial(pred_dis, 1, replacement=True)
+ if mov_length < min_mov_length:
+ mov_length = torch.multinomial(pred_dis, 1, replacement=True)
+
+ m_lens = mov_length * opt.unit_length
+ pred_motions, _, _ = trainer.generate(word_emb, pos_ohot, cap_lens, m_lens,
+ m_lens[0]//opt.unit_length, opt.dim_pose)
+ if t == 0:
+ # print(m_lens)
+ # print(text_data)
+ sub_dict = {'motion': pred_motions[0].cpu().numpy(),
+ 'length': m_lens[0].item(),
+ 'cap_len': cap_lens[0].item(),
+ 'caption': caption[0],
+ 'tokens': tokens}
+ generated_motion.append(sub_dict)
+
+ if is_mm:
+ mm_motions.append({
+ 'motion': pred_motions[0].cpu().numpy(),
+ 'length': m_lens[0].item()
+ })
+ if is_mm:
+ mm_generated_motions.append({'caption': caption[0],
+ 'tokens': tokens,
+ 'cap_len': cap_lens[0].item(),
+ 'mm_motions': mm_motions})
+
+ self.generated_motion = generated_motion
+ self.mm_generated_motion = mm_generated_motions
+ self.opt = opt
+ self.w_vectorizer = w_vectorizer
+
+
+ def __len__(self):
+ return len(self.generated_motion)
+
+
+ def __getitem__(self, item):
+ data = self.generated_motion[item]
+ motion, m_length, caption, tokens = data['motion'], data['length'], data['caption'], data['tokens']
+ sent_len = data['cap_len']
+
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ if m_length < self.opt.max_motion_length:
+ motion = np.concatenate([motion,
+ np.zeros((self.opt.max_motion_length - m_length, motion.shape[1]))
+ ], axis=0)
+ return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens)
+
+class CompMDMGeneratedDataset(Dataset):
+
+ def __init__(self, model, diffusion, dataloader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale=1., save_dir=None, seed=None):
+ assert seed is not None, "seed must be provided"
+ self.dataloader = dataloader
+ self.dataset = dataloader.dataset
+ self.save_dir = save_dir
+ assert save_dir is not None
+ assert mm_num_samples < len(dataloader.dataset)
+
+ # create the target directory
+ os.makedirs(self.save_dir, exist_ok=True)
+
+ use_ddim = False # FIXME - hardcoded
+ # NOTE: I have updated the code in gaussian_diffusion.py so that it won't clip denoise for xstart models.
+ # hence, always set the clip_denoised to True
+ clip_denoised = True
+ self.max_motion_length = max_motion_length
+ sample_fn = (
+ diffusion.p_sample_loop if not use_ddim else diffusion.ddim_sample_loop
+ )
+
+ real_num_batches = len(dataloader)
+ if num_samples_limit is not None:
+ real_num_batches = num_samples_limit // dataloader.batch_size + 1
+ print('real_num_batches', real_num_batches)
+
+ generated_motion = []
+ # NOTE: mm = multi-modal
+ mm_generated_motions = []
+ if mm_num_samples > 0:
+ mm_idxs = np.random.choice(real_num_batches, mm_num_samples // dataloader.batch_size +1, replace=False)
+ mm_idxs = np.sort(mm_idxs)
+ else:
+ mm_idxs = []
+ print('mm_idxs', mm_idxs)
+
+ model.eval()
+
+
+ with torch.no_grad():
+ for i, (motion, model_kwargs) in tqdm(enumerate(dataloader)):
+
+ if num_samples_limit is not None and len(generated_motion) >= num_samples_limit:
+ break
+
+ tokens = [t.split('_') for t in model_kwargs['y']['tokens']]
+
+ # add CFG scale to batch
+ if scale != 1.:
+ model_kwargs['y']['scale'] = torch.ones(motion.shape[0],
+ device=dist_util.dev()) * scale
+
+ mm_num_now = len(mm_generated_motions) // dataloader.batch_size
+ is_mm = i in mm_idxs
+ repeat_times = mm_num_repeats if is_mm else 1
+ mm_motions = []
+ for t in range(repeat_times):
+ # setting seed here make sure that the same seed is used even continuing from unfinished runs
+ seed_number = seed * 100_000 + i * 100 + t
+ fixseed(seed_number)
+
+ batch_file = f'{i:04d}_{t:02d}.pt'
+ batch_path = os.path.join(self.save_dir, batch_file)
+
+ # reusing the batch if it exists
+ if os.path.exists(batch_path):
+ # [bs, njoints, nfeat, seqlen]
+ sample = torch.load(batch_path, map_location=motion.device)
+ print(f'batch {batch_file} exists, loading from file')
+ else:
+ # [bs, njoints, nfeat, seqlen]
+ sample = sample_fn(
+ model,
+ motion.shape,
+ clip_denoised=clip_denoised,
+ model_kwargs=model_kwargs,
+ skip_timesteps=0, # 0 is the default value - i.e. don't skip any step
+ init_image=None,
+ progress=True,
+ dump_steps=None,
+ noise=None,
+ const_noise=False,
+ # when experimenting guidance_scale we want to nutrileze the effect of noise on generation
+ )
+ # save to file
+ torch.save(sample, batch_path)
+
+ # print('cut the motion length from {} to {}'.format(sample.shape[-1], self.max_motion_length))
+ sample = sample[:, :, :, :self.max_motion_length]
+ # Compute error for key xz locations
+ cur_motion = sample_to_motion(sample, self.dataset, model)
+ # We can get the trajectory from here. Get only root xz from motion
+ cur_traj = cur_motion[:, 0, [0, 2], :]
+
+ # NOTE: To test if the motion is reasonable or not
+ log_motion = False
+ if log_motion:
+ from utils.plot_script import plot_3d_motion
+ for j in tqdm([1, 3, 4, 5], desc="generating motion"):
+ motion_id = f'{i:04d}_{t:02d}_{j:02d}'
+ plot_3d_motion(os.path.join(self.save_dir, f"motion_cond_{motion_id}.mp4"), self.dataset.kinematic_chain,
+ cur_motion[j].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+ if self.dataset.absolute_3d:
+ # NOTE: Changing the output from absolute space to the relative space here.
+ # The easiest way to do this is to go all the way to skeleton and convert back again.
+ # sample shape [32, 263, 1, 196]
+ sample = abs3d_to_rel(sample, self.dataset, model)
+
+ if t == 0:
+ sub_dicts = [{'motion': sample[bs_i].squeeze().permute(1,0).cpu().numpy(),
+ 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(),
+ 'caption': model_kwargs['y']['text'][bs_i],
+ 'tokens': tokens[bs_i],
+ 'cap_len': len(tokens[bs_i]),
+ } for bs_i in range(dataloader.batch_size)]
+ generated_motion += sub_dicts
+
+ if is_mm:
+ mm_motions += [{'motion': sample[bs_i].squeeze().permute(1, 0).cpu().numpy(),
+ 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(),
+ 'traj': cur_traj[bs_i].squeeze().permute(1, 0).cpu().numpy(),
+ } for bs_i in range(dataloader.batch_size)]
+
+ if is_mm:
+ mm_generated_motions += [{
+ 'caption': model_kwargs['y']['text'][bs_i],
+ 'tokens': tokens[bs_i],
+ 'cap_len': len(tokens[bs_i]),
+ 'mm_motions': mm_motions[bs_i::dataloader.batch_size], # collect all 10 repeats from the (32*10) generated motions
+ } for bs_i in range(dataloader.batch_size)]
+
+
+ self.generated_motion = generated_motion
+ self.mm_generated_motion = mm_generated_motions
+ self.w_vectorizer = dataloader.dataset.w_vectorizer
+
+
+ def __len__(self):
+ return len(self.generated_motion)
+
+
+ def __getitem__(self, item):
+ data = self.generated_motion[item]
+ motion, m_length, caption, tokens = data['motion'], data['length'], data['caption'], data['tokens']
+ sent_len = data['cap_len']
+ if 'skate_ratio' in data.keys():
+ skate_ratio = data['skate_ratio']
+ else:
+ skate_ratio = -1
+
+ # print("get item")
+ # print("abs ", self.dataset.absolute_3d)
+ # print(self.dataset.mode)
+ # if self.dataset.absolute_3d:
+ # # If we use the dataset with absolute 3D location, we need to convert the motion to relative first
+ # normed_motion = motion
+ # denormed_motion = self.dataset.t2m_dataset.inv_transform(normed_motion)
+ # # Convert the denormed_motion from absolute 3D position to relative
+ # # denormed_motion_relative = self.dataset.t2m_dataset.abs3d_to_rel(denormed_motion)
+ # denormed_motion_relative = abs3d_to_rel(denormed_motion)
+
+ # if self.dataset.mode == 'eval':
+ # # Normalize again with the *T2M* mean and std
+ # renormed_motion = (denormed_motion_relative - self.dataset.mean_for_eval) / self.dataset.std_for_eval # according to T2M norms
+ # motion = renormed_motion
+ # else:
+ # # Normalize again with the *relative* mean and std.
+ # # Expect mode 'gt'
+ # # This assume that we will want to use this function to only get gt or for eval
+ # raise NotImplementedError
+ # renormed_motion_relative = (denormed_motion_relative - self.dataset.mean_rel) / self.dataset.std_rel
+ # motion = renormed_motion_relative
+
+ if self.dataset.mode == 'eval':
+ normed_motion = motion
+ if self.dataset.absolute_3d:
+ # Denorm with rel_transform because the inv_transform() will have the absolute mean and std
+ # The motion is already converted to relative after inference
+ denormed_motion = (normed_motion * self.dataset.std_rel) + self.dataset.mean_rel
+ else:
+ denormed_motion = self.dataset.t2m_dataset.inv_transform(normed_motion)
+ renormed_motion = (denormed_motion - self.dataset.mean_for_eval) / self.dataset.std_for_eval # according to T2M norms
+ motion = renormed_motion
+ # This step is needed because T2M evaluators expect their norm convention
+
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens), skate_ratio
+
+
+# Data class for generated motion by *conditioning*
+class CompMDMGeneratedDatasetCondition(Dataset):
+
+ def __init__(self, model_dict, diffusion_dict, dataloader, mm_num_samples, mm_num_repeats,
+ max_motion_length, num_samples_limit, scale=1., save_dir=None, impute_until=0, skip_first_stage=False,
+ seed=None, use_ddim=False):
+
+ assert seed is not None, "must provide seed"
+
+ self.dataloader = dataloader
+ self.dataset = dataloader.dataset
+ self.save_dir = save_dir
+ # This affect the trajectory model if we do two-stage, if not, it will affect the motion model
+ # For trajectory model, the output traj will be imptued until 20 (set by impute_slack)
+ self.impute_until = impute_until
+
+ motion_model, traj_model = model_dict["motion"], model_dict["traj"]
+ motion_diffusion, traj_diffusion = diffusion_dict["motion"], diffusion_dict["traj"]
+
+ ### Basic settings
+ motion_classifier_scale = 100.0
+ print("motion classifier scale", motion_classifier_scale)
+ log_motion = False
+ guidance_mode = 'no'
+ abs_3d = True
+ use_random_proj = self.dataset.use_rand_proj
+ print("guidance mode", guidance_mode)
+ print("use ddim", use_ddim)
+
+ model_device = next(motion_model.parameters()).device
+ motion_diffusion.data_get_mean_fn = self.dataset.t2m_dataset.get_std_mean
+ motion_diffusion.data_transform_fn = self.dataset.t2m_dataset.transform_th
+ motion_diffusion.data_inv_transform_fn = self.dataset.t2m_dataset.inv_transform_th
+ if log_motion:
+ motion_diffusion.log_trajectory_fn = partial(
+ log_trajectory_from_xstart,
+ kframes=[],
+ inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ abs_3d=abs_3d, # <--- assume the motion model is absolute
+ use_rand_proj=self.dataset.use_rand_proj,
+ traject_only=False,
+ n_frames=max_motion_length)
+
+ if traj_diffusion is not None:
+ trajectory_classifier_scale = 100.0 # 100.0
+ print("trajectory classifier scale", trajectory_classifier_scale)
+ traj_diffusion.data_transform_fn = None
+ traj_diffusion.data_inv_transform_fn = None
+ if log_motion:
+ traj_diffusion.log_trajectory_fn = partial(
+ log_trajectory_from_xstart,
+ kframes=[],
+ inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ abs_3d=abs_3d, # <--- assume the traj model is absolute
+ traject_only=True,
+ n_frames=max_motion_length)
+ sample_fn_traj = (
+ traj_diffusion.p_sample_loop if not use_ddim else traj_diffusion.ddim_sample_loop
+ )
+ traj_model.eval()
+ else:
+ # If we don't have a trajectory diffusion model, assume that we are using classifier-free 1-stage model
+ pass
+
+ assert save_dir is not None
+ assert mm_num_samples < len(dataloader.dataset)
+
+ # create the target directory
+ os.makedirs(self.save_dir, exist_ok=True)
+
+ # use_ddim = False # FIXME - hardcoded
+ # NOTE: I have updated the code in gaussian_diffusion.py so that it won't clip denoise for xstart models.
+ # hence, always set the clip_denoised to True
+ clip_denoised = True
+ self.max_motion_length = max_motion_length
+
+ sample_fn_motion = (
+ motion_diffusion.p_sample_loop if not use_ddim else motion_diffusion.ddim_sample_loop
+ )
+
+ real_num_batches = len(dataloader)
+ if num_samples_limit is not None:
+ real_num_batches = num_samples_limit // dataloader.batch_size + 1
+ print('real_num_batches', real_num_batches)
+
+ generated_motion = []
+ # NOTE: mm = multi-modal
+ mm_generated_motions = []
+ if mm_num_samples > 0:
+ mm_idxs = np.random.choice(real_num_batches, mm_num_samples // dataloader.batch_size +1, replace=False)
+ mm_idxs = np.sort(mm_idxs)
+ else:
+ mm_idxs = []
+ print('mm_idxs', mm_idxs)
+
+ motion_model.eval()
+
+ with torch.no_grad():
+ for i, (motion, model_kwargs) in tqdm(enumerate(dataloader)):
+ '''For each datapoint, we do the following
+ 1. Sample 3-10 (?) points from the ground truth trajectory to be used as conditions
+ 2. Generate trajectory with trajectory model
+ 3. Generate motion based on the generated traj using inpainting and cond_fn.
+ '''
+
+ if num_samples_limit is not None and len(generated_motion) >= num_samples_limit:
+ break
+
+ tokens = [t.split('_') for t in model_kwargs['y']['tokens']]
+ # add CFG scale to batch
+ if scale != 1.:
+ model_kwargs['y']['scale'] = torch.ones(motion.shape[0],
+ device=dist_util.dev()) * scale
+
+ ### 1. Prepare motion for conditioning ###
+ traj_model_kwargs = copy.deepcopy(model_kwargs)
+ traj_model_kwargs['y']['traj_model'] = True
+ model_kwargs['y']['traj_model'] = False
+
+ # Convert to 3D motion space
+ # NOTE: the 'motion' will not be random projected if dataset mode is 'eval' or 'gt',
+ # even if the 'self.dataset.t2m_dataset.use_rand_proj' is True
+ gt_poses = motion.permute(0, 2, 3, 1)
+ gt_poses = gt_poses * self.dataset.std + self.dataset.mean # [bs, 1, 196, 263]
+ # (x,y,z) [bs, 1, 120, njoints=22, nfeat=3]
+ gt_skel_motions = recover_from_ric(gt_poses.float(), 22, abs_3d=False)
+ gt_skel_motions = gt_skel_motions.view(-1, *gt_skel_motions.shape[2:]).permute(0, 2, 3, 1)
+ gt_skel_motions = motion_model.rot2xyz(x=gt_skel_motions, mask=None, pose_rep='xyz', glob=True, translation=True,
+ jointstype='smpl', vertstrans=True, betas=None, beta=0, glob_rot=None, get_rotations_back=False)
+ # gt_skel_motions shape [32, 22, 3, 196]
+ # # Visualize to make sure it is correct
+ # from utils.plot_script import plot_3d_motion
+ # plot_3d_motion("./test_positions_1.mp4", self.dataset.kinematic_chain,
+ # gt_skel_motions[0].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+ # Next, sample points, then prepare target and inpainting mask for trajectory model
+ ## Sample points
+ n_keyframe = 5
+ # reusing the target if it exists
+ target_batch_file = f'target_{i:04d}.pt'
+ target_batch_file = os.path.join(self.save_dir, target_batch_file)
+ if os.path.exists(target_batch_file):
+ # [batch_size, n_keyframe]
+ sampled_keyframes = torch.load(target_batch_file, map_location=motion.device)
+ print(f'sample keyframes {target_batch_file} exists, loading from file')
+ else:
+ sampled_keyframes = torch.rand(motion.shape[0], n_keyframe) * model_kwargs['y']['lengths'].unsqueeze(-1)
+ # Floor to int because ceil to 'lengths' will make the idx out-of-bound.
+ # The keyframe can be a duplicate.
+ sampled_keyframes = torch.floor(sampled_keyframes).int().sort()[0] # shape [batch_size, n_keyframe]
+ torch.save(sampled_keyframes, target_batch_file)
+ # import pdb; pdb.set_trace()
+ ## Prepare target and mask for grad cal
+ # Prepare trajecotry inpainting
+ (target, target_mask,
+ inpaint_traj, inpaint_traj_mask,
+ inpaint_traj_points, inpaint_traj_mask_points,
+ inpaint_motion, inpaint_mask,
+ inpaint_motion_points, inpaint_mask_points) = get_target_and_inpt_from_kframes_batch(gt_skel_motions, sampled_keyframes, self.dataset)
+
+ target = target.to(model_device)
+ target_mask = target_mask.to(model_device)
+ model_kwargs['y']['target'] = target
+ model_kwargs['y']['target_mask'] = target_mask
+ # target [32, 196, 22, 3] # in 3d skeleton
+ # inpaint [32, 4, 1, 196] # in model input space
+ ### End 1. preparing condition ###
+
+ mm_num_now = len(mm_generated_motions) // dataloader.batch_size
+ is_mm = i in mm_idxs
+ repeat_times = mm_num_repeats if is_mm else 1
+ mm_motions = []
+ mm_trajectories = []
+ for t in range(repeat_times):
+ seed_number = seed * 100_000 + i * 100 + t
+ fixseed(seed_number)
+ batch_file = f'{i:04d}_{t:02d}.pt'
+ batch_path = os.path.join(self.save_dir, batch_file)
+
+ # reusing the batch if it exists
+ if os.path.exists(batch_path):
+ # [bs, njoints, nfeat, seqlen]
+ sample_motion = torch.load(batch_path, map_location=motion.device)
+ print(f'batch {batch_file} exists, loading from file')
+ else:
+ print(f'working on {batch_file}')
+ # for smoother motions
+ impute_slack = 20
+ # NOTE: For debugging
+ traj_model_kwargs['y']['log_name'] = self.save_dir
+ traj_model_kwargs['y']['log_id'] = i
+ model_kwargs['y']['log_name'] = self.save_dir
+ model_kwargs['y']['log_id'] = i
+ # motion model always impute until 20
+ model_kwargs['y']['cond_until'] = impute_slack
+ model_kwargs['y']['impute_until'] = impute_slack
+
+ if skip_first_stage:
+ # No first stage. Skip straight to second stage
+ ### Add motion to inpaint
+ # import pdb; pdb.set_trace()
+ # del model_kwargs['y']['inpainted_motion']
+ # del model_kwargs['y']['inpainting_mask']
+ model_kwargs['y']['inpainted_motion'] = inpaint_motion.to(model_device) # init_motion.to(model_device)
+ model_kwargs['y']['inpainting_mask'] = inpaint_mask.to(model_device)
+
+ model_kwargs['y']['inpainted_motion_second_stage'] = inpaint_motion_points.to(model_device)
+ model_kwargs['y']['inpainting_mask_second_stage'] = inpaint_mask_points.to(model_device)
+ # import pdb; pdb.set_trace()
+
+ # For classifier-free
+ CLASSIFIER_FREE = True
+ if CLASSIFIER_FREE:
+ impute_until = 1
+ impute_slack = 20
+ # del model_kwargs['y']['inpainted_motion']
+ # del model_kwargs['y']['inpainting_mask']
+ model_kwargs['y']['inpainted_motion'] = inpaint_motion_points.to(model_device) # init_motion.to(model_device)
+ model_kwargs['y']['inpainting_mask'] = inpaint_mask_points.to(model_device)
+
+ # Set when to stop imputing
+ model_kwargs['y']['cond_until'] = impute_slack
+ model_kwargs['y']['impute_until'] = impute_until
+ model_kwargs['y']['impute_until_second_stage'] = impute_slack
+
+ else:
+ ### Add motion to inpaint
+ traj_model_kwargs['y']['inpainted_motion'] = inpaint_traj.to(model_device) # init_motion.to(model_device)
+ traj_model_kwargs['y']['inpainting_mask'] = inpaint_traj_mask.to(model_device)
+
+ # Set when to stop imputing
+ traj_model_kwargs['y']['cond_until'] = impute_slack
+ traj_model_kwargs['y']['impute_until'] = impute_until
+ # NOTE: We have the option of switching the target motion from line to just key locations
+ # We call this a 'second stage', which will start after t reach 'impute_until'
+ traj_model_kwargs['y']['impute_until_second_stage'] = impute_slack
+ traj_model_kwargs['y']['inpainted_motion_second_stage'] = inpaint_traj_points.to(model_device)
+ traj_model_kwargs['y']['inpainting_mask_second_stage'] = inpaint_traj_mask_points.to(model_device)
+
+
+ ##########################################################
+ # print("************* Test: not using dense gradient ****************")
+ # NO_GRAD = True
+ # traj_model_kwargs['y']['cond_until'] = 1000
+
+ # traj_model_kwargs['y']['impute_until'] = 1000
+ # traj_model_kwargs['y']['impute_until_second_stage'] = 0
+
+ ##########################################################
+
+ ### Generate trajectory
+ # [bs, njoints, nfeat, seqlen]
+ # NOTE: add cond_fn
+ sample_traj = sample_fn_traj(
+ traj_model,
+ inpaint_traj.shape,
+ clip_denoised=clip_denoised,
+ model_kwargs=traj_model_kwargs, # <-- traj_kwards
+ skip_timesteps=0, # NOTE: for debugging, start from 900
+ init_image=None,
+ progress=True,
+ dump_steps=None,
+ noise=None,
+ const_noise=False,
+ cond_fn=partial(
+ cond_fn_key_location, # cond_fn_sdf, #,
+ transform=self.dataset.t2m_dataset.transform_th,
+ inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ target=target,
+ target_mask=target_mask,
+ kframes=[],
+ abs_3d=abs_3d, # <<-- hard code,
+ classifiler_scale=trajectory_classifier_scale,
+ use_mse_loss=False), # <<-- hard code
+ )
+
+ ### Prepare conditions for motion from generated trajectory ###
+ # Get inpainting information for motion model
+ traj_motion, traj_mask = get_inpainting_motion_from_traj(
+ sample_traj, inv_transform_fn=self.dataset.t2m_dataset.inv_transform_th)
+ # Get target for loss grad
+ # Target has dimention [bs, max_motion_length, 22, 3]
+ target = torch.zeros([motion.shape[0], max_motion_length, 22, 3], device=traj_motion.device)
+ target_mask = torch.zeros_like(target, dtype=torch.bool)
+ # This assume that the traj_motion is in the 3D space without normalization
+ # traj_motion: [3, 263, 1, 196]
+ target[:, :, 0, [0, 2]] = traj_motion.permute(0, 3, 2, 1)[:, :, 0,[1, 2]]
+ target_mask[:, :, 0, [0, 2]] = True
+ # Set imputing trajectory
+ model_kwargs['y']['inpainted_motion'] = traj_motion
+ model_kwargs['y']['inpainting_mask'] = traj_mask
+ ### End - Prepare conditions ###
+
+ # import pdb; pdb.set_trace()
+
+ ### Generate motion
+ # NOTE: add cond_fn
+ # TODO: move the followings to a separate function
+ if guidance_mode == "kps" or guidance_mode == "trajectory":
+ cond_fn = CondKeyLocations(target=target,
+ target_mask=target_mask,
+ transform=self.dataset.t2m_dataset.transform_th,
+ inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ abs_3d=abs_3d,
+ classifiler_scale=motion_classifier_scale,
+ use_mse_loss=False,
+ use_rand_projection=self.dataset.use_random_proj
+ )
+ # elif guidance_mode == "sdf":
+ # cond_fn = CondKeyLocationsWithSdf(target=target,
+ # target_mask=target_mask,
+ # transform=data.dataset.t2m_dataset.transform_th,
+ # inv_transform=data.dataset.t2m_dataset.inv_transform_th,
+ # abs_3d=abs_3d,
+ # classifiler_scale=motion_classifier_scale,
+ # use_mse_loss=False,
+ # use_rand_projection=self.dataset.use_random_proj,
+ # obs_list=obs_list
+ # )
+ elif guidance_mode == "no" or guidance_mode == "mdm_legacy":
+ cond_fn = None
+
+ # if NO_GRAD:
+ # cond_fn = None
+
+ sample_motion = sample_fn_motion(
+ motion_model,
+ (motion.shape[0], motion_model.njoints, motion_model.nfeats, motion.shape[3]), # motion.shape
+ clip_denoised=clip_denoised,
+ model_kwargs=model_kwargs,
+ skip_timesteps=0,
+ init_image=None,
+ progress=True,
+ dump_steps=None,
+ noise=None,
+ const_noise=False,
+ cond_fn=cond_fn
+ # partial(
+ # cond_fn_key_location,
+ # transform=self.dataset.t2m_dataset.transform_th,
+ # inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ # target=target,
+ # target_mask=target_mask,
+ # kframes=[],
+ # abs_3d=True, # <<-- hard code,
+ # classifiler_scale=motion_classifier_scale,
+ # use_mse_loss=False), # <<-- hard code
+ )
+ # save to file
+ torch.save(sample_motion, batch_path)
+
+
+ # print('cut the motion length from {} to {}'.format(sample_motion.shape[-1], self.max_motion_length))
+ sample = sample_motion[:, :, :, :self.max_motion_length]
+
+ # Compute error for key xz locations
+ cur_motion = sample_to_motion(sample, self.dataset, motion_model)
+ kps_error = compute_kps_error(cur_motion, gt_skel_motions, sampled_keyframes) # [batch_size, 5] in meter
+ skate_ratio, skate_vel = calculate_skating_ratio(cur_motion) # [batch_size]
+ # import pdb; pdb.set_trace()
+ # We can get the trajectory from here. Get only root xz from motion
+ cur_traj = cur_motion[:, 0, [0, 2], :]
+
+ # NOTE: To test if the motion is reasonable or not
+ if log_motion:
+ from utils.plot_script import plot_3d_motion
+ for j in tqdm([1, 3, 4, 5], desc="generating motion"):
+ motion_id = f'{i:04d}_{t:02d}_{j:02d}'
+ plot_3d_motion(os.path.join(self.save_dir, f"motion_cond_{motion_id}.mp4"), self.dataset.kinematic_chain,
+ cur_motion[j].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+ if self.dataset.absolute_3d:
+ # NOTE: Changing the output from absolute space to the relative space here.
+ # The easiest way to do this is to go all the way to skeleton and convert back again.
+ # sample shape [32, 263, 1, 196]
+ sample = abs3d_to_rel(sample, self.dataset, motion_model)
+
+ if t == 0:
+ sub_dicts = [{'motion': sample[bs_i].squeeze().permute(1,0).cpu().numpy(),
+ 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(),
+ 'caption': model_kwargs['y']['text'][bs_i],
+ 'tokens': tokens[bs_i],
+ 'cap_len': len(tokens[bs_i]),
+ 'dist_error': kps_error[bs_i].cpu().numpy(),
+ 'skate_ratio': skate_ratio[bs_i],
+ } for bs_i in range(dataloader.batch_size)]
+ generated_motion += sub_dicts
+
+ if is_mm:
+ mm_motions += [{'motion': sample[bs_i].squeeze().permute(1, 0).cpu().numpy(),
+ 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(),
+ 'traj': cur_traj[bs_i].squeeze().permute(1, 0).cpu().numpy(),
+ } for bs_i in range(dataloader.batch_size)]
+ # import pdb; pdb.set_trace()
+
+ if is_mm:
+ mm_generated_motions += [{
+ 'caption': model_kwargs['y']['text'][bs_i],
+ 'tokens': tokens[bs_i],
+ 'cap_len': len(tokens[bs_i]),
+ 'mm_motions': mm_motions[bs_i::dataloader.batch_size], # collect all 10 repeats from the (32*10) generated motions
+ } for bs_i in range(dataloader.batch_size)]
+
+
+ self.generated_motion = generated_motion
+ self.mm_generated_motion = mm_generated_motions
+ self.w_vectorizer = dataloader.dataset.w_vectorizer
+
+
+ def __len__(self):
+ return len(self.generated_motion)
+
+
+ def __getitem__(self, item):
+ data = self.generated_motion[item]
+ motion, m_length, caption, tokens = data['motion'], data['length'], data['caption'], data['tokens']
+ dist_error = data['dist_error']
+ skate_ratio = data['skate_ratio']
+ sent_len = data['cap_len']
+
+ if self.dataset.mode == 'eval':
+ normed_motion = motion
+ if self.dataset.absolute_3d:
+ # Denorm with rel_transform because the inv_transform() will have the absolute mean and std
+ # The motion is already converted to relative after inference
+ # import pdb; pdb.set_trace()
+ denormed_motion = (normed_motion * self.dataset.std_rel) + self.dataset.mean_rel
+ else:
+ denormed_motion = self.dataset.t2m_dataset.inv_transform(normed_motion)
+ renormed_motion = (denormed_motion - self.dataset.mean_for_eval) / self.dataset.std_for_eval # according to T2M norms
+ motion = renormed_motion
+ # This step is needed because T2M evaluators expect their norm convention
+
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens), dist_error, skate_ratio
+
+
+# Data class for generated motion by *inpainting full trajectory*
+class CompMDMGeneratedDatasetInpainting(Dataset):
+
+ def __init__(self, model, diffusion, dataloader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale=1., save_dir=None, seed=None):
+ assert seed is not None, "seed must be provided"
+ self.dataloader = dataloader
+ self.dataset = dataloader.dataset
+ self.save_dir = save_dir
+ assert save_dir is not None
+ assert mm_num_samples < len(dataloader.dataset)
+
+ # create the target directory
+ os.makedirs(self.save_dir, exist_ok=True)
+
+ # Settings
+ motion_classifier_scale = 100.0
+ print("motion classifier scale", motion_classifier_scale)
+ log_motion = False # False
+
+ model_device = next(model.parameters()).device
+ diffusion.data_get_mean_fn = self.dataset.t2m_dataset.get_std_mean
+ diffusion.data_transform_fn = self.dataset.t2m_dataset.transform_th
+ diffusion.data_inv_transform_fn = self.dataset.t2m_dataset.inv_transform_th
+ if log_motion:
+ diffusion.log_trajectory_fn = partial(
+ log_trajectory_from_xstart,
+ kframes=[],
+ inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ abs_3d=True, # <--- assume the motion model is absolute
+ use_rand_proj=self.dataset.use_rand_proj,
+ traject_only=False,
+ n_frames=max_motion_length)
+
+ use_ddim = False # FIXME - hardcoded
+ # NOTE: I have updated the code in gaussian_diffusion.py so that it won't clip denoise for xstart models.
+ # hence, always set the clip_denoised to True
+ clip_denoised = True
+ self.max_motion_length = max_motion_length
+ sample_fn = (
+ diffusion.p_sample_loop if not use_ddim else diffusion.ddim_sample_loop
+ )
+
+ real_num_batches = len(dataloader)
+ if num_samples_limit is not None:
+ real_num_batches = num_samples_limit // dataloader.batch_size + 1
+ print('real_num_batches', real_num_batches)
+
+ generated_motion = []
+ # NOTE: mm = multi-modal
+ mm_generated_motions = []
+ if mm_num_samples > 0:
+ mm_idxs = np.random.choice(real_num_batches, mm_num_samples // dataloader.batch_size +1, replace=False)
+ mm_idxs = np.sort(mm_idxs)
+ else:
+ mm_idxs = []
+ print('mm_idxs', mm_idxs)
+ model.eval()
+
+ with torch.no_grad():
+ for i, (motion, model_kwargs) in tqdm(enumerate(dataloader)):
+
+ if num_samples_limit is not None and len(generated_motion) >= num_samples_limit:
+ break
+
+ tokens = [t.split('_') for t in model_kwargs['y']['tokens']]
+
+ # add CFG scale to batch
+ if scale != 1.:
+ model_kwargs['y']['scale'] = torch.ones(motion.shape[0],
+ device=dist_util.dev()) * scale
+
+ model_kwargs['y']['log_name'] = self.save_dir
+ ### 1. Prepare motion for conditioning ###
+ model_kwargs['y']['traj_model'] = False
+ model_kwargs['y']['log_id'] = i
+ # Convert to 3D motion space
+ # NOTE: the 'motion' will not be random projected if dataset mode is 'eval' or 'gt',
+ # even if the 'self.dataset.t2m_dataset.use_rand_proj' is True
+ gt_poses = motion.permute(0, 2, 3, 1)
+ gt_poses = gt_poses * self.dataset.std + self.dataset.mean # [bs, 1, 196, 263]
+ # (x,y,z) [bs, 1, 120, njoints=22, nfeat=3]
+ gt_skel_motions = recover_from_ric(gt_poses.float(), 22, abs_3d=False)
+ gt_skel_motions = gt_skel_motions.view(-1, *gt_skel_motions.shape[2:]).permute(0, 2, 3, 1)
+ gt_skel_motions = model.rot2xyz(x=gt_skel_motions, mask=None, pose_rep='xyz', glob=True, translation=True,
+ jointstype='smpl', vertstrans=True, betas=None, beta=0, glob_rot=None, get_rotations_back=False)
+ # gt_skel_motions shape [32, 22, 3, 196]
+ # # Visualize to make sure it is correct
+ # from utils.plot_script import plot_3d_motion
+ # plot_3d_motion("./test_positions_1.mp4", self.dataset.kinematic_chain,
+ # gt_skel_motions[0].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+ ## Prepare target and mask for grad cal
+ inpaint_motion, inpaint_mask, target, target_mask = get_inpainting_motion_from_gt(
+ gt_skel_motions, dataloader.batch_size, model_device, model_kwargs['y']['lengths'],
+ inv_transform_fn=self.dataset.t2m_dataset.inv_transform_th)
+ model_kwargs['y']['target'] = target
+ model_kwargs['y']['target_mask'] = target_mask
+ # target [32, 196, 22, 3] # in 3d skeleton
+ # inpaint [32, 263, 1, 196] # in model input space
+ ### End 1. preparing condition ###
+
+ mm_num_now = len(mm_generated_motions) // dataloader.batch_size
+ is_mm = i in mm_idxs
+ repeat_times = mm_num_repeats if is_mm else 1
+ mm_motions = []
+ for t in range(repeat_times):
+ # setting seed here make sure that the same seed is used even continuing from unfinished runs
+ seed_number = seed * 100_000 + i * 100 + t
+ fixseed(seed_number)
+
+ batch_file = f'{i:04d}_{t:02d}.pt'
+ batch_path = os.path.join(self.save_dir, batch_file)
+
+ # reusing the batch if it exists
+ if os.path.exists(batch_path):
+ # [bs, njoints, nfeat, seqlen]
+ sample = torch.load(batch_path, map_location=motion.device)
+ print(f'batch {batch_file} exists, loading from file')
+ else:
+ # Set inpainting information
+ model_kwargs['y']['inpainted_motion'] = inpaint_motion.to(model_device)
+ model_kwargs['y']['inpainting_mask'] = inpaint_mask.to(model_device)
+ # Set when to stop imputing
+ model_kwargs['y']['impute_until'] = 0
+ model_kwargs['y']['cond_until'] = 0
+
+ # [bs, njoints, nfeat, seqlen]
+ do_optimize = False
+ if do_optimize:
+ cond_fn = partial(
+ cond_fn_key_location,
+ transform=self.dataset.t2m_dataset.transform_th,
+ inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ target=target,
+ target_mask=target_mask,
+ kframes=[],
+ abs_3d=True, # <<-- hard code,
+ classifiler_scale=motion_classifier_scale,
+ use_mse_loss=False) # <<-- hard code
+ else:
+ cond_fn = None
+ sample = sample_fn(
+ model,
+ (motion.shape[0], model.njoints, model.nfeats, motion.shape[3]), # motion.shape
+ clip_denoised=clip_denoised,
+ model_kwargs=model_kwargs,
+ skip_timesteps=0, # 0 is the default value - i.e. don't skip any step
+ init_image=None,
+ progress=True,
+ dump_steps=None,
+ noise=None,
+ const_noise=False,
+ cond_fn=cond_fn,
+ )
+ # save to file
+ torch.save(sample, batch_path)
+
+ # print('cut the motion length from {} to {}'.format(sample.shape[-1], self.max_motion_length))
+ sample = sample[:, :, :, :self.max_motion_length]
+ cur_motion = sample_to_motion(sample, self.dataset, model)
+ skate_ratio, skate_vel = calculate_skating_ratio(cur_motion) # [batch_size]
+
+ # NOTE: To test if the motion is reasonable or not
+ if log_motion:
+
+ from utils.plot_script import plot_3d_motion
+ for j in tqdm([1, 3, 4, 5], desc="generating motion"):
+ motion_id = f'{i:04d}_{t:02d}_{j:02d}'
+ plot_3d_motion(os.path.join(self.save_dir, f"motion_cond_{motion_id}.mp4"), self.dataset.kinematic_chain,
+ cur_motion[j].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+ if self.dataset.absolute_3d:
+ # NOTE: Changing the output from absolute space to the relative space here.
+ # The easiest way to do this is to go all the way to skeleton and convert back again.
+ # sample shape [32, 263, 1, 196]
+ sample = abs3d_to_rel(sample, self.dataset, model)
+
+ if t == 0:
+ sub_dicts = [{'motion': sample[bs_i].squeeze().permute(1,0).cpu().numpy(),
+ 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(),
+ 'caption': model_kwargs['y']['text'][bs_i],
+ 'tokens': tokens[bs_i],
+ 'cap_len': len(tokens[bs_i]),
+ 'skate_ratio': skate_ratio[bs_i],
+ } for bs_i in range(dataloader.batch_size)]
+ generated_motion += sub_dicts
+
+ if is_mm:
+ mm_motions += [{'motion': sample[bs_i].squeeze().permute(1, 0).cpu().numpy(),
+ 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(),
+ } for bs_i in range(dataloader.batch_size)]
+
+ if is_mm:
+ mm_generated_motions += [{
+ 'caption': model_kwargs['y']['text'][bs_i],
+ 'tokens': tokens[bs_i],
+ 'cap_len': len(tokens[bs_i]),
+ 'mm_motions': mm_motions[bs_i::dataloader.batch_size], # collect all 10 repeats from the (32*10) generated motions
+ } for bs_i in range(dataloader.batch_size)]
+
+
+ self.generated_motion = generated_motion
+ self.mm_generated_motion = mm_generated_motions
+ self.w_vectorizer = dataloader.dataset.w_vectorizer
+
+
+ def __len__(self):
+ return len(self.generated_motion)
+
+
+ def __getitem__(self, item):
+ data = self.generated_motion[item]
+ motion, m_length, caption, tokens = data['motion'], data['length'], data['caption'], data['tokens']
+ sent_len = data['cap_len']
+ skate_ratio = data['skate_ratio']
+
+ if self.dataset.mode == 'eval':
+ normed_motion = motion
+ if self.dataset.absolute_3d:
+ # Denorm with rel_transform because the inv_transform() will have the absolute mean and std
+ # The motion is already converted to relative after inference
+ denormed_motion = (normed_motion * self.dataset.std_rel) + self.dataset.mean_rel
+ else:
+ denormed_motion = self.dataset.t2m_dataset.inv_transform(normed_motion)
+ renormed_motion = (denormed_motion - self.dataset.mean_for_eval) / self.dataset.std_for_eval # according to T2M norms
+ motion = renormed_motion
+ # This step is needed because T2M evaluators expect their norm convention
+
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens), skate_ratio
diff --git a/data_loaders/custom/motion_loaders/comp_v6_model_dataset_condmdi.py b/data_loaders/custom/motion_loaders/comp_v6_model_dataset_condmdi.py
new file mode 100644
index 0000000..05438d8
--- /dev/null
+++ b/data_loaders/custom/motion_loaders/comp_v6_model_dataset_condmdi.py
@@ -0,0 +1,565 @@
+import torch
+from utils.fixseed import fixseed
+from networks.modules import *
+from networks.trainers import CompTrainerV6
+from torch.utils.data import Dataset, DataLoader
+from os.path import join as pjoin
+from tqdm import tqdm
+from utils import dist_util
+import os
+import copy
+from functools import partial
+
+from data_loaders.custom.data.dataset import abs3d_to_rel, sample_to_motion, rel_to_abs3d
+from scripts.motion_process import recover_from_ric
+from utils.metrics import calculate_skating_ratio
+from sample.gmd.condition import (cond_fn_key_location, get_target_from_kframes, get_target_and_inpt_from_kframes_batch,
+ log_trajectory_from_xstart, get_inpainting_motion_from_traj, get_inpainting_motion_from_gt,
+ cond_fn_key_location, compute_kps_error, cond_fn_sdf,
+ CondKeyLocations, CondKeyLocationsWithSdf, compute_kps_error_arbitrary)
+from utils.editing_util import get_keyframes_mask
+
+
+# Data class for generated motion by *conditioning*
+class CompMDMGeneratedDatasetCondMDI(Dataset):
+
+ def __init__(self, model_dict, diffusion_dict, dataloader, mm_num_samples, mm_num_repeats,
+ max_motion_length, num_samples_limit, text_scale=1., keyframe_scale=1., save_dir=None, impute_until=0, skip_first_stage=False,
+ seed=None, use_ddim=False, args=None):
+
+ assert seed is not None, "must provide seed"
+ self.args = args
+ self.dataloader = dataloader
+ self.dataset = dataloader.dataset
+ self.save_dir = save_dir
+ # This affect the trajectory model if we do two-stage, if not, it will affect the motion model
+ # For trajectory model, the output traj will be imptued until 20 (set by impute_slack)
+ self.impute_until = impute_until
+
+ motion_model, traj_model = model_dict["motion"], model_dict["traj"]
+ motion_diffusion, traj_diffusion = diffusion_dict["motion"], diffusion_dict["traj"]
+
+ ### Basic settings
+ # motion_classifier_scale = 100.0
+ # print("motion classifier scale", motion_classifier_scale)
+ log_motion = False
+ # guidance_mode = 'no'
+ abs_3d = True
+ use_random_proj = self.dataset.use_rand_proj
+ # print("guidance mode", guidance_mode)
+ print("use ddim", use_ddim)
+
+ model_device = next(motion_model.parameters()).device
+ motion_diffusion.data_get_mean_fn = self.dataset.t2m_dataset.get_std_mean
+ motion_diffusion.data_transform_fn = self.dataset.t2m_dataset.transform_th
+ motion_diffusion.data_inv_transform_fn = self.dataset.t2m_dataset.inv_transform_th
+ if log_motion:
+ motion_diffusion.log_trajectory_fn = partial(
+ log_trajectory_from_xstart,
+ kframes=[],
+ inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ abs_3d=abs_3d, # <--- assume the motion model is absolute
+ use_rand_proj=self.dataset.use_rand_proj,
+ traject_only=False,
+ n_frames=max_motion_length)
+
+ assert save_dir is not None
+ assert mm_num_samples < len(dataloader.dataset)
+
+ # create the target directory
+ os.makedirs(self.save_dir, exist_ok=True)
+
+ # use_ddim = False # FIXME - hardcoded
+ # NOTE: I have updated the code in gaussian_diffusion.py so that it won't clip denoise for xstart models.
+ # hence, always set the clip_denoised to True
+ # clip_denoised = True
+ self.max_motion_length = max_motion_length
+
+ # sample_fn_motion = (
+ # motion_diffusion.p_sample_loop if not use_ddim else motion_diffusion.ddim_sample_loop
+ # )
+
+ real_num_batches = len(dataloader)
+ if num_samples_limit is not None:
+ real_num_batches = num_samples_limit // dataloader.batch_size + 1
+ print('real_num_batches', real_num_batches)
+
+ generated_motion = []
+ # NOTE: mm = multi-modal
+ mm_generated_motions = []
+ if mm_num_samples > 0:
+ mm_idxs = np.random.choice(real_num_batches, mm_num_samples // dataloader.batch_size +1, replace=False)
+ mm_idxs = np.sort(mm_idxs)
+ else:
+ mm_idxs = []
+ print('mm_idxs', mm_idxs)
+
+ motion_model.eval()
+
+ with torch.no_grad():
+ for i, (motion, model_kwargs) in tqdm(enumerate(dataloader)):
+ '''For each datapoint, we do the following
+ 1. Sample 3-10 (?) points from the ground truth trajectory to be used as conditions
+ 2. Generate trajectory with trajectory model
+ 3. Generate motion based on the generated traj using inpainting and cond_fn.
+ '''
+
+ if num_samples_limit is not None and len(generated_motion) >= num_samples_limit:
+ break
+
+ tokens = [t.split('_') for t in model_kwargs['y']['tokens']]
+
+ # add CFG scale to batch
+ # add CFG scale to batch
+ if args.guidance_param != 1:
+ # text classifier-free guidance
+ model_kwargs['y']['text_scale'] = torch.ones(motion.shape[0], device=dist_util.dev()) * text_scale
+ if args.keyframe_guidance_param != 1:
+ # keyframe classifier-free guidance
+ model_kwargs['y']['keyframe_scale'] = torch.ones(motion.shape[0], device=dist_util.dev()) * keyframe_scale
+
+ ### 1. Prepare motion for conditioning ###
+ model_kwargs['y']['traj_model'] = False
+
+ # Convert to 3D motion space
+ # NOTE: the 'motion' will not be random projected if dataset mode is 'eval' or 'gt',
+ # even if the 'self.dataset.t2m_dataset.use_rand_proj' is True
+ # NOTE: the 'motion' will have relative representation if dataset mode is 'eval' or 'gt',
+ # even if the 'self.dataset.t2m_dataset.use_abs3d' is True
+ gt_poses = motion.permute(0, 2, 3, 1)
+ gt_poses = gt_poses * self.dataset.std + self.dataset.mean # [bs, 1, 196, 263] # TODO: mean and std are absolute mean and std and this is done on purpose! Why? dataset: The 'eval' is here because we want inv_transform to work the same way at inference for model with abs3d,regradless of which dataset is loaded.
+ # TODO: gt_poses = gt_poses * self.dataset.std_rel + self.dataset.mean_rel
+ # (x,y,z) [bs, 1, 120, njoints=22, nfeat=3]
+ gt_skel_motions = recover_from_ric(gt_poses.float(), 22, abs_3d=False)
+ gt_skel_motions = gt_skel_motions.view(-1, *gt_skel_motions.shape[2:]).permute(0, 2, 3, 1)
+ gt_skel_motions = motion_model.rot2xyz(x=gt_skel_motions, mask=None, pose_rep='xyz', glob=True, translation=True,
+ jointstype='smpl', vertstrans=True, betas=None, beta=0, glob_rot=None, get_rotations_back=False)
+ # gt_skel_motions shape [32, 22, 3, 196]
+ # Visualize to make sure it is correct
+ # from utils.plot_script import plot_3d_motion
+ # plot_3d_motion("./gt_source_abs.mp4", self.dataset.kinematic_chain,
+ # gt_skel_motions[0].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+
+ ### START TEST ###
+ # gt_poses = motion.permute(0, 2, 3, 1)
+ # gt_poses = gt_poses * self.dataset.std_rel + self.dataset.mean_rel
+ # # (x,y,z) [bs, 1, 120, njoints=22, nfeat=3]
+ # gt_skel_motions = recover_from_ric(gt_poses.float(), 22, abs_3d=False)
+ # gt_skel_motions = gt_skel_motions.view(-1, *gt_skel_motions.shape[2:]).permute(0, 2, 3, 1)
+ # gt_skel_motions = motion_model.rot2xyz(x=gt_skel_motions, mask=None, pose_rep='xyz', glob=True, translation=True,
+ # jointstype='smpl', vertstrans=True, betas=None, beta=0, glob_rot=None, get_rotations_back=False)
+ # # gt_skel_motions shape [32, 22, 3, 196]
+ # # Visualize to make sure it is correct
+ # from utils.plot_script import plot_3d_motion
+ # plot_3d_motion("./gt_source_rel.mp4", self.dataset.kinematic_chain,
+ # gt_skel_motions[0].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+ # Sample gt_source_abs.mp4 looks better
+ ### END TEST ###
+
+ # Convert relative representation to absolute representation for ground-truth motions
+ motion_abs = rel_to_abs3d(sample_rel=motion, dataset=self.dataset, model=motion_model).to(dist_util.dev())
+ ### START TEST ###
+ # Visualize to make sure it is correct
+ # gt_poses = model_kwargs['y']['inpainted_motion'].permute(0, 2, 3, 1)
+ # gt_poses = gt_poses * self.dataset.std + self.dataset.mean # [bs, 1, 196, 263]
+ # # (x,y,z) [bs, 1, 120, njoints=22, nfeat=3]
+ # gt_skel_motions = recover_from_ric(gt_poses.float(), 22, abs_3d=True)
+ # gt_skel_motions = gt_skel_motions.view(-1, *gt_skel_motions.shape[2:]).permute(0, 2, 3, 1)
+ # gt_skel_motions = motion_model.rot2xyz(x=gt_skel_motions, mask=None, pose_rep='xyz', glob=True, translation=True,
+ # jointstype='smpl', vertstrans=True, betas=None, beta=0, glob_rot=None, get_rotations_back=False)
+ # from utils.plot_script import plot_3d_motion
+ # plot_3d_motion("./test_rel2glob_gt.mp4", self.dataset.kinematic_chain,
+ # gt_skel_motions[0].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+ # Sample matches gt_source_abs.mp4
+ ### END TEST ###
+
+ # import pdb; pdb.set_trace()
+
+ ### START OUR BUILD OF model_kwargs ###
+ if motion_model.keyframe_conditioned:
+ # Conditional synthesis arguments:
+ keyframes_indices, joint_mask = self.set_conditional_synthesis_args(model_kwargs, motion_abs)
+ elif self.args.imputate or self.args.reconstruction_guidance:
+ # Editing arguments:
+ keyframes_indices, joint_mask = self.set_inference_editing_args(model_kwargs, motion_abs)
+ ### END OUR BUILD OF model_kwargs ###
+
+ mm_num_now = len(mm_generated_motions) // dataloader.batch_size
+ is_mm = i in mm_idxs
+ repeat_times = mm_num_repeats if is_mm else 1
+ mm_motions = []
+ mm_trajectories = []
+ for t in range(repeat_times):
+ seed_number = seed * 100_000 + i * 100 + t
+ fixseed(seed_number)
+ batch_file = f'{i:04d}_{t:02d}.pt'
+ batch_path = os.path.join(self.save_dir, batch_file)
+
+ # reusing the batch if it exists
+ # if os.path.exists(batch_path):
+ if False: # GUY - IGNORE CACHE FOR NOW
+ # [bs, njoints, nfeat, seqlen]
+ sample_motion = torch.load(batch_path, map_location=motion.device)
+ print(f'batch {batch_file} exists, loading from file')
+ else:
+ print(f'working on {batch_file}')
+ # for smoother motions
+ # impute_slack = 20
+ # NOTE: For debugging
+ # traj_model_kwargs['y']['log_name'] = self.save_dir
+ # traj_model_kwargs['y']['log_id'] = i
+ model_kwargs['y']['log_name'] = self.save_dir
+ model_kwargs['y']['log_id'] = i
+ # motion model always impute until 20
+ # model_kwargs['y']['cond_until'] = impute_slack
+ # model_kwargs['y']['impute_until'] = impute_slack
+
+ # if skip_first_stage:
+ # # No first stage. Skip straight to second stage
+ # ### Add motion to inpaint
+ # # import pdb; pdb.set_trace()
+ # # del model_kwargs['y']['inpainted_motion']
+ # # del model_kwargs['y']['inpainting_mask']
+ # model_kwargs['y']['inpainted_motion'] = inpaint_motion.to(model_device) # init_motion.to(model_device)
+ # model_kwargs['y']['inpainting_mask'] = inpaint_mask.to(model_device)
+ #
+ # model_kwargs['y']['inpainted_motion_second_stage'] = inpaint_motion_points.to(model_device)
+ # model_kwargs['y']['inpainting_mask_second_stage'] = inpaint_mask_points.to(model_device)
+ # # import pdb; pdb.set_trace()
+ #
+ # # For classifier-free
+ # CLASSIFIER_FREE = True
+ # if CLASSIFIER_FREE:
+ # impute_until = 1
+ # impute_slack = 20
+ # # del model_kwargs['y']['inpainted_motion']
+ # # del model_kwargs['y']['inpainting_mask']
+ # model_kwargs['y']['inpainted_motion'] = inpaint_motion_points.to(model_device) # init_motion.to(model_device)
+ # model_kwargs['y']['inpainting_mask'] = inpaint_mask_points.to(model_device)
+ #
+ # # Set when to stop imputing
+ # model_kwargs['y']['cond_until'] = impute_slack
+ # model_kwargs['y']['impute_until'] = impute_until
+ # model_kwargs['y']['impute_until_second_stage'] = impute_slack
+ #
+ # else:
+ # ### Add motion to inpaint
+ # traj_model_kwargs['y']['inpainted_motion'] = inpaint_traj.to(model_device) # init_motion.to(model_device)
+ # traj_model_kwargs['y']['inpainting_mask'] = inpaint_traj_mask.to(model_device)
+ #
+ # # Set when to stop imputing
+ # traj_model_kwargs['y']['cond_until'] = impute_slack
+ # traj_model_kwargs['y']['impute_until'] = impute_until
+ # # NOTE: We have the option of switching the target motion from line to just key locations
+ # # We call this a 'second stage', which will start after t reach 'impute_until'
+ # traj_model_kwargs['y']['impute_until_second_stage'] = impute_slack
+ # traj_model_kwargs['y']['inpainted_motion_second_stage'] = inpaint_traj_points.to(model_device)
+ # traj_model_kwargs['y']['inpainting_mask_second_stage'] = inpaint_traj_mask_points.to(model_device)
+ #
+ #
+ # ##########################################################
+ # # print("************* Test: not using dense gradient ****************")
+ # # NO_GRAD = True
+ # # traj_model_kwargs['y']['cond_until'] = 1000
+ #
+ # # traj_model_kwargs['y']['impute_until'] = 1000
+ # # traj_model_kwargs['y']['impute_until_second_stage'] = 0
+ #
+ # ##########################################################
+ #
+ # ### Generate trajectory
+ # # [bs, njoints, nfeat, seqlen]
+ # # NOTE: add cond_fn
+ # sample_traj = sample_fn_traj(
+ # traj_model,
+ # inpaint_traj.shape,
+ # clip_denoised=clip_denoised,
+ # model_kwargs=traj_model_kwargs, # <-- traj_kwards
+ # skip_timesteps=0, # NOTE: for debugging, start from 900
+ # init_image=None,
+ # progress=True,
+ # dump_steps=None,
+ # noise=None,
+ # const_noise=False,
+ # cond_fn=partial(
+ # cond_fn_key_location, # cond_fn_sdf, #,
+ # transform=self.dataset.t2m_dataset.transform_th,
+ # inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ # target=target,
+ # target_mask=target_mask,
+ # kframes=[],
+ # abs_3d=abs_3d, # <<-- hard code,
+ # classifiler_scale=trajectory_classifier_scale,
+ # use_mse_loss=False), # <<-- hard code
+ # )
+ #
+ # ### Prepare conditions for motion from generated trajectory ###
+ # # Get inpainting information for motion model
+ # traj_motion, traj_mask = get_inpainting_motion_from_traj(
+ # sample_traj, inv_transform_fn=self.dataset.t2m_dataset.inv_transform_th)
+ # # Get target for loss grad
+ # # Target has dimention [bs, max_motion_length, 22, 3]
+ # target = torch.zeros([motion.shape[0], max_motion_length, 22, 3], device=traj_motion.device)
+ # target_mask = torch.zeros_like(target, dtype=torch.bool)
+ # # This assume that the traj_motion is in the 3D space without normalization
+ # # traj_motion: [3, 263, 1, 196]
+ # target[:, :, 0, [0, 2]] = traj_motion.permute(0, 3, 2, 1)[:, :, 0,[1, 2]]
+ # target_mask[:, :, 0, [0, 2]] = True
+ # # Set imputing trajectory
+ # model_kwargs['y']['inpainted_motion'] = traj_motion
+ # model_kwargs['y']['inpainting_mask'] = traj_mask
+ # ### End - Prepare conditions ###
+
+ ### Generate motion
+ # NOTE: add cond_fn
+ # TODO: move the followings to a separate function
+ # if guidance_mode == "kps" or guidance_mode == "trajectory":
+ # cond_fn = CondKeyLocations(target=target,
+ # target_mask=target_mask,
+ # transform=self.dataset.t2m_dataset.transform_th,
+ # inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ # abs_3d=abs_3d,
+ # classifiler_scale=motion_classifier_scale,
+ # use_mse_loss=False,
+ # use_rand_projection=self.dataset.use_random_proj
+ # )
+ # # elif guidance_mode == "sdf":
+ # # cond_fn = CondKeyLocationsWithSdf(target=target,
+ # # target_mask=target_mask,
+ # # transform=data.dataset.t2m_dataset.transform_th,
+ # # inv_transform=data.dataset.t2m_dataset.inv_transform_th,
+ # # abs_3d=abs_3d,
+ # # classifiler_scale=motion_classifier_scale,
+ # # use_mse_loss=False,
+ # # use_rand_projection=self.dataset.use_random_proj,
+ # # obs_list=obs_list
+ # # )
+ # elif guidance_mode == "no" or guidance_mode == "mdm_legacy":
+ # cond_fn = None
+
+ # if NO_GRAD:
+ # cond_fn = None
+ sample_fn = motion_diffusion.p_sample_loop
+
+ sample_motion = sample_fn(
+ motion_model,
+ (motion.shape[0], motion_model.njoints, motion_model.nfeats, motion.shape[3]),
+ clip_denoised=False,
+ model_kwargs=model_kwargs,
+ skip_timesteps=0, # 0 is the default value - i.e. don't skip any step
+ init_image=None,
+ progress=False, # True,
+ dump_steps=None,
+ noise=None,
+ const_noise=False,
+ )
+
+ # sample_motion = sample_fn_motion(
+ # motion_model,
+ # (motion.shape[0], motion_model.njoints, motion_model.nfeats, motion.shape[3]), # motion.shape
+ # clip_denoised=clip_denoised,
+ # model_kwargs=model_kwargs,
+ # skip_timesteps=0,
+ # init_image=None,
+ # progress=True,
+ # dump_steps=None,
+ # noise=None,
+ # const_noise=False,
+ # cond_fn=cond_fn
+ # # partial(
+ # # cond_fn_key_location,
+ # # transform=self.dataset.t2m_dataset.transform_th,
+ # # inv_transform=self.dataset.t2m_dataset.inv_transform_th,
+ # # target=target,
+ # # target_mask=target_mask,
+ # # kframes=[],
+ # # abs_3d=True, # <<-- hard code,
+ # # classifiler_scale=motion_classifier_scale,
+ # # use_mse_loss=False), # <<-- hard code
+ # )
+ # save to file
+ torch.save(sample_motion, batch_path)
+
+
+ # print('cut the motion length from {} to {}'.format(sample_motion.shape[-1], self.max_motion_length))
+ sample = sample_motion[:, :, :, :self.max_motion_length]
+
+ # Compute error for key xz locations
+ cur_motion = sample_to_motion(sample, self.dataset, motion_model)
+ #kps_error = compute_kps_error(cur_motion, gt_skel_motions, keyframes) # [batch_size, 5] in meter
+ kps_error = compute_kps_error_arbitrary(cur_motion, gt_skel_motions, keyframes_indices, traj_only=True)
+ keyframe_error = compute_kps_error_arbitrary(cur_motion, gt_skel_motions, keyframes_indices, traj_only=False)
+ skate_ratio, skate_vel = calculate_skating_ratio(cur_motion) # [batch_size]
+ # We can get the trajectory from here. Get only root xz from motion
+ cur_traj = cur_motion[:, 0, [0, 2], :]
+
+ # NOTE: To test if the motion is reasonable or not
+ if log_motion:
+ from utils.plot_script import plot_3d_motion
+ for j in tqdm([1, 3, 4, 5], desc="generating motion"):
+ motion_id = f'{i:04d}_{t:02d}_{j:02d}'
+ plot_3d_motion(os.path.join(self.save_dir, f"motion_cond_{motion_id}.mp4"), self.dataset.kinematic_chain,
+ cur_motion[j].permute(2,0,1).detach().cpu().numpy(), 'title', 'humanml', fps=20)
+
+ if self.dataset.absolute_3d:
+ # NOTE: Changing the output from absolute space to the relative space here.
+ # The easiest way to do this is to go all the way to skeleton and convert back again.
+ # sample shape [32, 263, 1, 196]
+ sample = abs3d_to_rel(sample, self.dataset, motion_model)
+
+ if t == 0:
+ sub_dicts = [{'motion': sample[bs_i].squeeze().permute(1,0).cpu().numpy(),
+ 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(),
+ 'caption': model_kwargs['y']['text'][bs_i],
+ 'tokens': tokens[bs_i],
+ 'cap_len': len(tokens[bs_i]),
+ 'dist_error': kps_error[bs_i].cpu().numpy(),
+ 'skate_ratio': skate_ratio[bs_i],
+ 'keyframe_error': keyframe_error[bs_i].cpu().numpy(),
+ 'num_keyframes': len(keyframes_indices[bs_i]) if keyframes_indices[bs_i] is not None else 0,
+ } for bs_i in range(dataloader.batch_size)]
+ generated_motion += sub_dicts
+
+ if is_mm:
+ mm_motions += [{'motion': sample[bs_i].squeeze().permute(1, 0).cpu().numpy(),
+ 'length': model_kwargs['y']['lengths'][bs_i].cpu().numpy(),
+ 'traj': cur_traj[bs_i].squeeze().permute(1, 0).cpu().numpy(),
+ } for bs_i in range(dataloader.batch_size)]
+
+ if is_mm:
+ mm_generated_motions += [{
+ 'caption': model_kwargs['y']['text'][bs_i],
+ 'tokens': tokens[bs_i],
+ 'cap_len': len(tokens[bs_i]),
+ 'mm_motions': mm_motions[bs_i::dataloader.batch_size], # collect all 10 repeats from the (32*10) generated motions
+ } for bs_i in range(dataloader.batch_size)]
+
+
+ self.generated_motion = generated_motion
+ self.mm_generated_motion = mm_generated_motions
+ self.w_vectorizer = dataloader.dataset.w_vectorizer
+
+
+ def __len__(self):
+ return len(self.generated_motion)
+
+
+ def __getitem__(self, item):
+ data = self.generated_motion[item]
+ motion, m_length, caption, tokens = data['motion'], data['length'], data['caption'], data['tokens']
+ dist_error = data['dist_error']
+ skate_ratio = data['skate_ratio']
+ sent_len = data['cap_len']
+ keyframe_error = data['keyframe_error']
+ num_keyframes = data['num_keyframes']
+
+ if self.dataset.mode == 'eval':
+ normed_motion = motion
+ if self.dataset.absolute_3d:
+ # Denorm with rel_transform because the inv_transform() will have the absolute mean and std
+ # The motion is already converted to relative after inference
+ denormed_motion = (normed_motion * self.dataset.std_rel) + self.dataset.mean_rel
+ else:
+ denormed_motion = self.dataset.t2m_dataset.inv_transform(normed_motion)
+ renormed_motion = (denormed_motion - self.dataset.mean_for_eval) / self.dataset.std_for_eval # according to T2M norms
+ motion = renormed_motion
+ # This step is needed because T2M evaluators expect their norm convention
+
+ pos_one_hots = []
+ word_embeddings = []
+ for token in tokens:
+ word_emb, pos_oh = self.w_vectorizer[token]
+ pos_one_hots.append(pos_oh[None, :])
+ word_embeddings.append(word_emb[None, :])
+ pos_one_hots = np.concatenate(pos_one_hots, axis=0)
+ word_embeddings = np.concatenate(word_embeddings, axis=0)
+
+ return word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, '_'.join(tokens), dist_error, skate_ratio, keyframe_error, num_keyframes
+
+
+ def get_keyframe_indices(self, keyframes_mask):
+ keyframe_indices = []
+ for sample_i in range(keyframes_mask.shape[0]):
+ keyframe_indices.append([int(e) for e in keyframes_mask[sample_i].sum(dim=0).squeeze().nonzero().squeeze(-1)])
+ return keyframe_indices
+
+
+ def set_inference_editing_args(self, model_kwargs, input_motions):
+ """ Set arguments for inference-time editing according to edit.py
+
+ Args:
+ model_kwargs (dict): arguments for the model
+ input_motions (torch.tensor): ground-truth motion with absolute-root representation
+
+ Returns:
+ torch.tensor: keyframe_indices
+ torch.tensor: joint_mask
+ """
+ model_kwargs['y']['inpainted_motion'] = input_motions
+ model_kwargs['y']['imputate'] = self.args.imputate
+ model_kwargs['y']['replacement_distribution'] = self.args.replacement_distribution
+ model_kwargs['y']['reconstruction_guidance'] = self.args.reconstruction_guidance
+ model_kwargs['y']['reconstruction_weight'] = self.args.reconstruction_weight
+ model_kwargs['y']['diffusion_steps'] = self.args.diffusion_steps
+ model_kwargs['y']['gradient_schedule'] = self.args.gradient_schedule
+ model_kwargs['y']['stop_imputation_at'] = self.args.stop_imputation_at
+ model_kwargs['y']['stop_recguidance_at'] = self.args.stop_recguidance_at
+
+ # if args.text_condition == '':
+ # args.guidance_param = 0. # Force unconditioned generation
+
+ model_kwargs['y']['inpainting_mask'], joint_mask = get_keyframes_mask(data=model_kwargs['y']['inpainted_motion'],
+ lengths=model_kwargs['y']['lengths'],
+ edit_mode=self.args.edit_mode,
+ trans_length=self.args.transition_length,
+ feature_mode=self.args.editable_features,
+ get_joint_mask=True, n_keyframes=self.args.n_keyframes)
+
+ return self.get_keyframe_indices(model_kwargs['y']['inpainting_mask']), joint_mask
+
+
+ def set_conditional_synthesis_args(self, model_kwargs, input_motions):
+ """ Set arguments for conditional sampling according to conditional_synthesis.py
+
+ Args:
+ model_kwargs (dict): arguments for the model
+ input_motions (torch.tensor): ground-truth motion with absolute-root representation
+
+ Returns:
+ torch.tensor: keyframe_indices
+ torch.tensor: joint_mask
+ """
+ model_kwargs['obs_x0'] = input_motions
+ model_kwargs['obs_mask'], joint_mask = get_keyframes_mask(data=input_motions, lengths=model_kwargs['y']['lengths'], edit_mode=self.args.edit_mode,
+ feature_mode=self.args.editable_features, trans_length=self.args.transition_length,
+ get_joint_mask=True, n_keyframes=self.args.n_keyframes)
+ model_kwargs['y']['diffusion_steps'] = self.args.diffusion_steps
+ # Add inpainting mask according to args
+ if self.args.zero_keyframe_loss: # if loss is 0 over keyframes durint training, then must impute keyframes during inference
+ model_kwargs['y']['imputate'] = 1
+ model_kwargs['y']['stop_imputation_at'] = 0
+ model_kwargs['y']['replacement_distribution'] = 'conditional'
+ model_kwargs['y']['inpainted_motion'] = model_kwargs['obs_x0']
+ model_kwargs['y']['inpainting_mask'] = model_kwargs['obs_mask'] # used to do [nsamples, nframes] --> [nsamples, njoints, nfeats, nframes]
+ model_kwargs['y']['reconstruction_guidance'] = False
+ elif self.args.imputate: # if loss was present over keyframes during training, we may use inpaiting at inference time
+ model_kwargs['y']['imputate'] = 1
+ model_kwargs['y']['stop_imputation_at'] = self.args.stop_imputation_at
+ model_kwargs['y']['replacement_distribution'] = 'conditional' # TODO: check if should also support marginal distribution
+ model_kwargs['y']['inpainted_motion'] = model_kwargs['obs_x0']
+ model_kwargs['y']['inpainting_mask'] = model_kwargs['obs_mask']
+ if self.args.reconstruction_guidance: # if loss was present over keyframes during training, we may use guidance at inference time
+ model_kwargs['y']['reconstruction_guidance'] = self.args.reconstruction_guidance
+ model_kwargs['y']['reconstruction_weight'] = self.args.reconstruction_weight
+ model_kwargs['y']['gradient_schedule'] = self.args.gradient_schedule
+ model_kwargs['y']['stop_recguidance_at'] = self.args.stop_recguidance_at
+ elif self.args.reconstruction_guidance: # if loss was present over keyframes during training, we may use guidance at inference time
+ model_kwargs['y']['inpainted_motion'] = model_kwargs['obs_x0']
+ model_kwargs['y']['inpainting_mask'] = model_kwargs['obs_mask']
+ model_kwargs['y']['reconstruction_guidance'] = self.args.reconstruction_guidance
+ model_kwargs['y']['reconstruction_weight'] = self.args.reconstruction_weight
+ model_kwargs['y']['gradient_schedule'] = self.args.gradient_schedule
+ model_kwargs['y']['stop_recguidance_at'] = self.args.stop_recguidance_at
+
+ return self.get_keyframe_indices(model_kwargs['obs_mask']), joint_mask
diff --git a/data_loaders/custom/motion_loaders/dataset_motion_loader.py b/data_loaders/custom/motion_loaders/dataset_motion_loader.py
new file mode 100644
index 0000000..37fff1d
--- /dev/null
+++ b/data_loaders/custom/motion_loaders/dataset_motion_loader.py
@@ -0,0 +1,27 @@
+from t2m.data.dataset import Text2MotionDatasetV2, collate_fn
+from t2m.utils.word_vectorizer import WordVectorizer
+import numpy as np
+from os.path import join as pjoin
+from torch.utils.data import DataLoader
+from t2m.utils.get_opt import get_opt
+
+def get_dataset_motion_loader(opt_path, batch_size, device):
+ opt = get_opt(opt_path, device)
+
+ # Configurations of T2M dataset and KIT dataset is almost the same
+ if opt.dataset_name == 't2m' or opt.dataset_name == 'kit':
+ print('Loading dataset %s ...' % opt.dataset_name)
+
+ mean = np.load(pjoin(opt.meta_dir, 'mean.npy'))
+ std = np.load(pjoin(opt.meta_dir, 'std.npy'))
+
+ w_vectorizer = WordVectorizer('./glove', 'our_vab')
+ split_file = pjoin(opt.data_root, 'test.txt')
+ dataset = Text2MotionDatasetV2(opt, mean, std, split_file, w_vectorizer)
+ dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True,
+ collate_fn=collate_fn, shuffle=True)
+ else:
+ raise KeyError('Dataset not Recognized !!')
+
+ print('Ground Truth Dataset Loading Completed!!!')
+ return dataloader, dataset
\ No newline at end of file
diff --git a/data_loaders/custom/motion_loaders/model_motion_loaders.py b/data_loaders/custom/motion_loaders/model_motion_loaders.py
new file mode 100644
index 0000000..2149d4e
--- /dev/null
+++ b/data_loaders/custom/motion_loaders/model_motion_loaders.py
@@ -0,0 +1,208 @@
+from torch.utils.data import DataLoader, Dataset
+from utils.get_opt import get_opt
+from motion_loaders.comp_v6_model_dataset import (CompMDMGeneratedDataset,
+ CompMDMGeneratedDatasetCondition,
+ CompMDMGeneratedDatasetInpainting)
+from motion_loaders.comp_v6_model_dataset_condmdi import CompMDMGeneratedDatasetCondMDI
+from utils.word_vectorizer import WordVectorizer
+import numpy as np
+from torch.utils.data._utils.collate import default_collate
+from utils.fixseed import fixseed
+import os
+
+
+def collate_fn(batch):
+ batch.sort(key=lambda x: x[3], reverse=True)
+ return default_collate(batch)
+
+
+class MMGeneratedDataset(Dataset):
+ def __init__(self, opt, motion_dataset, w_vectorizer):
+ self.opt = opt
+ self.dataset = motion_dataset.mm_generated_motion
+ self.w_vectorizer = w_vectorizer
+
+ def __len__(self):
+ return len(self.dataset)
+
+ def __getitem__(self, item):
+ data = self.dataset[item]
+ mm_motions = data['mm_motions']
+ m_lens = []
+ motions = []
+ trajs = []
+ for mm_motion in mm_motions:
+ m_lens.append(mm_motion['length'])
+ motion = mm_motion['motion']
+ traj = mm_motion['traj']
+ # We don't need the following logic because our sample func generates the full tensor anyway:
+ # if len(motion) < self.opt.max_motion_length:
+ # motion = np.concatenate([motion,
+ # np.zeros((self.opt.max_motion_length - len(motion), motion.shape[1]))
+ # ], axis=0)
+ motion = motion[None, :]
+ traj = traj[None, :]
+ # print("motion shape", motion.shape)
+ # print("traj shape", traj.shape)
+ motions.append(motion)
+ trajs.append(traj)
+ # import pdb; pdb.set_trace()
+ m_lens = np.array(m_lens, dtype=np.int)
+ motions = np.concatenate(motions, axis=0)
+ trajs = np.concatenate(trajs, axis=0)
+ sort_indx = np.argsort(m_lens)[::-1].copy()
+ # print(m_lens)
+ # print(sort_indx)
+ # print(m_lens[sort_indx])
+ m_lens = m_lens[sort_indx]
+ motions = motions[sort_indx]
+ trajs = trajs[sort_indx]
+ return motions, m_lens, trajs
+
+
+def get_motion_loader(opt_path, batch_size, ground_truth_dataset,
+ mm_num_samples, mm_num_repeats, device):
+ opt = get_opt(opt_path, device, use_abs3d=...)
+ raise NotImplementedError('This function is not used anymore. Use get_mdm_loader instead.')
+
+ # Currently the configurations of two datasets are almost the same
+ if opt.dataset_name == 't2m' or opt.dataset_name == 'kit':
+ w_vectorizer = WordVectorizer('./glove', 'our_vab')
+ else:
+ raise KeyError('Dataset not recognized!!')
+ print('Generating %s ...' % opt.name)
+
+ if 'v6' in opt.name:
+ dataset = CompV6GeneratedDataset(opt, ground_truth_dataset,
+ w_vectorizer, mm_num_samples,
+ mm_num_repeats)
+ else:
+ raise KeyError('Dataset not recognized!!')
+
+ mm_dataset = MMGeneratedDataset(opt, dataset, w_vectorizer)
+
+ motion_loader = DataLoader(dataset,
+ batch_size=batch_size,
+ collate_fn=collate_fn,
+ drop_last=True,
+ num_workers=4)
+ mm_motion_loader = DataLoader(mm_dataset, batch_size=1, num_workers=1)
+
+ print('Generated Dataset Loading Completed!!!')
+
+ return motion_loader, mm_motion_loader
+
+
+# Our loader
+def get_mdm_loader(model, diffusion, batch_size, ground_truth_loader,
+ mm_num_samples, mm_num_repeats, max_motion_length,
+ num_samples_limit, scale, seed, save_dir, full_inpaint):
+ # set seed individually for each call
+ fixseed(seed)
+ opt = {
+ 'name': 'test', # FIXME
+ }
+ print('Generating %s ...' % opt['name'])
+ # dataset = CompMDMGeneratedDataset(opt, ground_truth_dataset, ground_truth_dataset.w_vectorizer, mm_num_samples, mm_num_repeats)
+ save_dir = os.path.join(save_dir, f'seed{seed:02d}')
+ print('save_dir:', save_dir)
+ if full_inpaint:
+ dataset = CompMDMGeneratedDatasetInpainting(model, diffusion, ground_truth_loader,
+ mm_num_samples, mm_num_repeats,
+ max_motion_length, num_samples_limit,
+ scale, save_dir=save_dir, seed=seed)
+ else:
+ dataset = CompMDMGeneratedDataset(model, diffusion, ground_truth_loader,
+ mm_num_samples, mm_num_repeats,
+ max_motion_length, num_samples_limit,
+ scale, save_dir=save_dir, seed=seed)
+
+ mm_dataset = MMGeneratedDataset(opt, dataset,
+ ground_truth_loader.dataset.w_vectorizer)
+
+ # NOTE: bs must not be changed! this will cause a bug in R precision calc!
+ motion_loader = DataLoader(dataset,
+ batch_size=batch_size,
+ collate_fn=collate_fn,
+ drop_last=True,
+ num_workers=4)
+ mm_motion_loader = DataLoader(mm_dataset, batch_size=1, num_workers=1)
+
+ print('Generated Dataset Loading Completed!!!')
+
+ return motion_loader, mm_motion_loader
+
+
+# Our loader for conditioning
+def get_mdm_loader_cond(model_dict, diffusion_dict, batch_size, ground_truth_loader,
+ mm_num_samples, mm_num_repeats, max_motion_length,
+ num_samples_limit, scale, seed, save_dir, impute_until, skip_first_stage,
+ use_ddim):
+ # set seed individually for each call
+ fixseed(seed)
+ opt = {
+ 'name': 'test', # FIXME
+ }
+ print('Generating %s ...' % opt['name'])
+ # dataset = CompMDMGeneratedDataset(opt, ground_truth_dataset, ground_truth_dataset.w_vectorizer, mm_num_samples, mm_num_repeats)
+ suffix = "_skip1st" if skip_first_stage else ""
+ save_dir = os.path.join(save_dir, f't{impute_until:03d}{suffix}_seed{seed:02d}')
+ print('save_dir:', save_dir)
+
+ dataset = CompMDMGeneratedDatasetCondition(model_dict, diffusion_dict, ground_truth_loader,
+ mm_num_samples, mm_num_repeats,
+ max_motion_length, num_samples_limit,
+ scale, save_dir=save_dir, impute_until=impute_until, skip_first_stage=skip_first_stage,
+ seed=seed, use_ddim=use_ddim)
+
+ mm_dataset = MMGeneratedDataset(opt, dataset,
+ ground_truth_loader.dataset.w_vectorizer)
+
+ # NOTE: bs must not be changed! this will cause a bug in R precision calc!
+ motion_loader = DataLoader(dataset,
+ batch_size=batch_size,
+ collate_fn=collate_fn,
+ drop_last=True,
+ num_workers=4)
+ mm_motion_loader = DataLoader(mm_dataset, batch_size=1, num_workers=1)
+
+ print('Generated Dataset Loading Completed!!!')
+
+ return motion_loader, mm_motion_loader
+
+
+def get_mdm_loader_ours(model_dict, diffusion_dict, batch_size, ground_truth_loader,
+ mm_num_samples, mm_num_repeats, max_motion_length,
+ num_samples_limit, text_scale, keyframe_scale, seed, save_dir, impute_until, skip_first_stage,
+ use_ddim, args):
+ # set seed individually for each call
+ fixseed(seed)
+ opt = {
+ 'name': 'test', # FIXME
+ }
+ print('Generating %s ...' % opt['name'])
+ # dataset = CompMDMGeneratedDataset(opt, ground_truth_dataset, ground_truth_dataset.w_vectorizer, mm_num_samples, mm_num_repeats)
+ suffix = "_skip1st" if skip_first_stage else ""
+ save_dir = os.path.join(save_dir, f't{impute_until:03d}{suffix}_seed{seed:02d}')
+ print('save_dir:', save_dir)
+
+ dataset = CompMDMGeneratedDatasetCondMDI(model_dict, diffusion_dict, ground_truth_loader,
+ mm_num_samples, mm_num_repeats,
+ max_motion_length, num_samples_limit,
+ text_scale, keyframe_scale, save_dir=save_dir, impute_until=impute_until, skip_first_stage=skip_first_stage,
+ seed=seed, use_ddim=use_ddim, args=args)
+
+ mm_dataset = MMGeneratedDataset(opt, dataset,
+ ground_truth_loader.dataset.w_vectorizer)
+
+ # NOTE: bs must not be changed! this will cause a bug in R precision calc!
+ motion_loader = DataLoader(dataset,
+ batch_size=batch_size,
+ collate_fn=collate_fn,
+ drop_last=True,
+ num_workers=4)
+ mm_motion_loader = DataLoader(mm_dataset, batch_size=1, num_workers=1)
+
+ print('Generated Dataset Loading Completed!!!')
+
+ return motion_loader, mm_motion_loader
diff --git a/data_loaders/custom/networks/__init__.py b/data_loaders/custom/networks/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/data_loaders/custom/networks/evaluator_wrapper.py b/data_loaders/custom/networks/evaluator_wrapper.py
new file mode 100644
index 0000000..6d049fa
--- /dev/null
+++ b/data_loaders/custom/networks/evaluator_wrapper.py
@@ -0,0 +1,187 @@
+from networks.modules import *
+from utils.word_vectorizer import POS_enumerator
+from os.path import join as pjoin
+
+def build_models(opt):
+ movement_enc = MovementConvEncoder(opt.dim_pose-4, opt.dim_movement_enc_hidden, opt.dim_movement_latent)
+ text_enc = TextEncoderBiGRUCo(word_size=opt.dim_word,
+ pos_size=opt.dim_pos_ohot,
+ hidden_size=opt.dim_text_hidden,
+ output_size=opt.dim_coemb_hidden,
+ device=opt.device)
+
+ motion_enc = MotionEncoderBiGRUCo(input_size=opt.dim_movement_latent,
+ hidden_size=opt.dim_motion_hidden,
+ output_size=opt.dim_coemb_hidden,
+ device=opt.device)
+
+ checkpoint = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'text_mot_match', 'model', 'finest.tar'),
+ map_location=opt.device)
+ movement_enc.load_state_dict(checkpoint['movement_encoder'])
+ text_enc.load_state_dict(checkpoint['text_encoder'])
+ motion_enc.load_state_dict(checkpoint['motion_encoder'])
+ print('Loading Evaluation Model Wrapper (Epoch %d) Completed!!' % (checkpoint['epoch']))
+ return text_enc, motion_enc, movement_enc
+
+
+class EvaluatorModelWrapper(object):
+
+ def __init__(self, opt):
+
+ if opt.dataset_name == 't2m':
+ opt.dim_pose = 263
+ elif opt.dataset_name == 'kit':
+ opt.dim_pose = 251
+ else:
+ raise KeyError('Dataset not Recognized!!!')
+
+ opt.dim_word = 300
+ opt.max_motion_length = 196
+ opt.dim_pos_ohot = len(POS_enumerator)
+ opt.dim_motion_hidden = 1024
+ opt.max_text_len = 20
+ opt.dim_text_hidden = 512
+ opt.dim_coemb_hidden = 512
+
+ self.text_encoder, self.motion_encoder, self.movement_encoder = build_models(opt)
+ self.opt = opt
+ self.device = opt.device
+
+ self.text_encoder.to(opt.device)
+ self.motion_encoder.to(opt.device)
+ self.movement_encoder.to(opt.device)
+
+ self.text_encoder.eval()
+ self.motion_encoder.eval()
+ self.movement_encoder.eval()
+
+ # Please note that the results does not following the order of inputs
+ def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):
+ with torch.no_grad():
+ word_embs = word_embs.detach().to(self.device).float()
+ pos_ohot = pos_ohot.detach().to(self.device).float()
+ motions = motions.detach().to(self.device).float()
+
+ align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()
+ motions = motions[align_idx]
+ m_lens = m_lens[align_idx]
+
+ '''Movement Encoding'''
+ movements = self.movement_encoder(motions[..., :-4]).detach()
+ m_lens = m_lens // self.opt.unit_length
+ motion_embedding = self.motion_encoder(movements, m_lens)
+
+ '''Text Encoding'''
+ text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)
+ text_embedding = text_embedding[align_idx]
+ return text_embedding, motion_embedding
+
+ # Please note that the results does not following the order of inputs
+ def get_motion_embeddings(self, motions, m_lens):
+ with torch.no_grad():
+ motions = motions.detach().to(self.device).float()
+
+ align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()
+ motions = motions[align_idx]
+ m_lens = m_lens[align_idx]
+
+ '''Movement Encoding'''
+ movements = self.movement_encoder(motions[..., :-4]).detach()
+ m_lens = m_lens // self.opt.unit_length
+ motion_embedding = self.motion_encoder(movements, m_lens)
+ return motion_embedding
+
+# our version
+def build_evaluators(opt):
+ movement_enc = MovementConvEncoder(opt['dim_pose']-4, opt['dim_movement_enc_hidden'], opt['dim_movement_latent'])
+ text_enc = TextEncoderBiGRUCo(word_size=opt['dim_word'],
+ pos_size=opt['dim_pos_ohot'],
+ hidden_size=opt['dim_text_hidden'],
+ output_size=opt['dim_coemb_hidden'],
+ device=opt['device'])
+
+ motion_enc = MotionEncoderBiGRUCo(input_size=opt['dim_movement_latent'],
+ hidden_size=opt['dim_motion_hidden'],
+ output_size=opt['dim_coemb_hidden'],
+ device=opt['device'])
+
+ ckpt_dir = opt['dataset_name']
+ if opt['dataset_name'] == 'humanml':
+ ckpt_dir = 't2m'
+
+ checkpoint = torch.load(pjoin(opt['checkpoints_dir'], ckpt_dir, 'text_mot_match', 'model', 'finest.tar'),
+ map_location=opt['device'])
+ movement_enc.load_state_dict(checkpoint['movement_encoder'])
+ text_enc.load_state_dict(checkpoint['text_encoder'])
+ motion_enc.load_state_dict(checkpoint['motion_encoder'])
+ print('Loading Evaluation Model Wrapper (Epoch %d) Completed!!' % (checkpoint['epoch']))
+ return text_enc, motion_enc, movement_enc
+
+# our wrapper
+class EvaluatorMDMWrapper(object):
+
+ def __init__(self, dataset_name, device):
+ opt = {
+ 'dataset_name': dataset_name,
+ 'device': device,
+ 'dim_word': 300,
+ 'max_motion_length': 196,
+ 'dim_pos_ohot': len(POS_enumerator),
+ 'dim_motion_hidden': 1024,
+ 'max_text_len': 20,
+ 'dim_text_hidden': 512,
+ 'dim_coemb_hidden': 512,
+ 'dim_pose': 263 if dataset_name == 'humanml' else 251,
+ 'dim_movement_enc_hidden': 512,
+ 'dim_movement_latent': 512,
+ 'checkpoints_dir': '.',
+ 'unit_length': 4,
+ }
+
+ self.text_encoder, self.motion_encoder, self.movement_encoder = build_evaluators(opt)
+ self.opt = opt
+ self.device = opt['device']
+
+ self.text_encoder.to(opt['device'])
+ self.motion_encoder.to(opt['device'])
+ self.movement_encoder.to(opt['device'])
+
+ self.text_encoder.eval()
+ self.motion_encoder.eval()
+ self.movement_encoder.eval()
+
+ # Please note that the results does not following the order of inputs
+ def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):
+ with torch.no_grad():
+ word_embs = word_embs.detach().to(self.device).float()
+ pos_ohot = pos_ohot.detach().to(self.device).float()
+ motions = motions.detach().to(self.device).float()
+
+ align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()
+ motions = motions[align_idx]
+ m_lens = m_lens[align_idx]
+
+ '''Movement Encoding'''
+ movements = self.movement_encoder(motions[..., :-4]).detach()
+ m_lens = m_lens // self.opt['unit_length']
+ motion_embedding = self.motion_encoder(movements, m_lens)
+
+ '''Text Encoding'''
+ text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)
+ text_embedding = text_embedding[align_idx]
+ return text_embedding, motion_embedding
+
+ # Please note that the results does not following the order of inputs
+ def get_motion_embeddings(self, motions, m_lens):
+ with torch.no_grad():
+ motions = motions.detach().to(self.device).float()
+
+ align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()
+ motions = motions[align_idx]
+ m_lens = m_lens[align_idx]
+
+ '''Movement Encoding'''
+ movements = self.movement_encoder(motions[..., :-4]).detach()
+ m_lens = m_lens // self.opt['unit_length']
+ motion_embedding = self.motion_encoder(movements, m_lens)
+ return motion_embedding
diff --git a/data_loaders/custom/networks/modules.py b/data_loaders/custom/networks/modules.py
new file mode 100644
index 0000000..3177738
--- /dev/null
+++ b/data_loaders/custom/networks/modules.py
@@ -0,0 +1,438 @@
+import torch
+import torch.nn as nn
+import numpy as np
+import time
+import math
+from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
+# from networks.layers import *
+import torch.nn.functional as F
+
+
+class ContrastiveLoss(torch.nn.Module):
+ """
+ Contrastive loss function.
+ Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
+ """
+ def __init__(self, margin=3.0):
+ super(ContrastiveLoss, self).__init__()
+ self.margin = margin
+
+ def forward(self, output1, output2, label):
+ euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True)
+ loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +
+ (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
+ return loss_contrastive
+
+
+def init_weight(m):
+ if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose1d):
+ nn.init.xavier_normal_(m.weight)
+ # m.bias.data.fill_(0.01)
+ if m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+
+
+def reparameterize(mu, logvar):
+ s_var = logvar.mul(0.5).exp_()
+ eps = s_var.data.new(s_var.size()).normal_()
+ return eps.mul(s_var).add_(mu)
+
+
+# batch_size, dimension and position
+# output: (batch_size, dim)
+def positional_encoding(batch_size, dim, pos):
+ assert batch_size == pos.shape[0]
+ positions_enc = np.array([
+ [pos[j] / np.power(10000, (i-i%2)/dim) for i in range(dim)]
+ for j in range(batch_size)
+ ], dtype=np.float32)
+ positions_enc[:, 0::2] = np.sin(positions_enc[:, 0::2])
+ positions_enc[:, 1::2] = np.cos(positions_enc[:, 1::2])
+ return torch.from_numpy(positions_enc).float()
+
+
+def get_padding_mask(batch_size, seq_len, cap_lens):
+ cap_lens = cap_lens.data.tolist()
+ mask_2d = torch.ones((batch_size, seq_len, seq_len), dtype=torch.float32)
+ for i, cap_len in enumerate(cap_lens):
+ mask_2d[i, :, :cap_len] = 0
+ return mask_2d.bool(), 1 - mask_2d[:, :, 0].clone()
+
+
+class PositionalEncoding(nn.Module):
+
+ def __init__(self, d_model, max_len=300):
+ super(PositionalEncoding, self).__init__()
+
+ pe = torch.zeros(max_len, d_model)
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
+ pe[:, 0::2] = torch.sin(position * div_term)
+ pe[:, 1::2] = torch.cos(position * div_term)
+ # pe = pe.unsqueeze(0).transpose(0, 1)
+ self.register_buffer('pe', pe)
+
+ def forward(self, pos):
+ return self.pe[pos]
+
+
+class MovementConvEncoder(nn.Module):
+ def __init__(self, input_size, hidden_size, output_size):
+ super(MovementConvEncoder, self).__init__()
+ self.main = nn.Sequential(
+ nn.Conv1d(input_size, hidden_size, 4, 2, 1),
+ nn.Dropout(0.2, inplace=True),
+ nn.LeakyReLU(0.2, inplace=True),
+ nn.Conv1d(hidden_size, output_size, 4, 2, 1),
+ nn.Dropout(0.2, inplace=True),
+ nn.LeakyReLU(0.2, inplace=True),
+ )
+ self.out_net = nn.Linear(output_size, output_size)
+ self.main.apply(init_weight)
+ self.out_net.apply(init_weight)
+
+ def forward(self, inputs):
+ inputs = inputs.permute(0, 2, 1)
+ outputs = self.main(inputs).permute(0, 2, 1)
+ # print(outputs.shape)
+ return self.out_net(outputs)
+
+
+class MovementConvDecoder(nn.Module):
+ def __init__(self, input_size, hidden_size, output_size):
+ super(MovementConvDecoder, self).__init__()
+ self.main = nn.Sequential(
+ nn.ConvTranspose1d(input_size, hidden_size, 4, 2, 1),
+ # nn.Dropout(0.2, inplace=True),
+ nn.LeakyReLU(0.2, inplace=True),
+ nn.ConvTranspose1d(hidden_size, output_size, 4, 2, 1),
+ # nn.Dropout(0.2, inplace=True),
+ nn.LeakyReLU(0.2, inplace=True),
+ )
+ self.out_net = nn.Linear(output_size, output_size)
+
+ self.main.apply(init_weight)
+ self.out_net.apply(init_weight)
+
+ def forward(self, inputs):
+ inputs = inputs.permute(0, 2, 1)
+ outputs = self.main(inputs).permute(0, 2, 1)
+ return self.out_net(outputs)
+
+
+class TextVAEDecoder(nn.Module):
+ def __init__(self, text_size, input_size, output_size, hidden_size, n_layers):
+ super(TextVAEDecoder, self).__init__()
+ self.input_size = input_size
+ self.output_size = output_size
+ self.hidden_size = hidden_size
+ self.n_layers = n_layers
+ self.emb = nn.Sequential(
+ nn.Linear(input_size, hidden_size),
+ nn.LayerNorm(hidden_size),
+ nn.LeakyReLU(0.2, inplace=True))
+
+ self.z2init = nn.Linear(text_size, hidden_size * n_layers)
+ self.gru = nn.ModuleList([nn.GRUCell(hidden_size, hidden_size) for i in range(self.n_layers)])
+ self.positional_encoder = PositionalEncoding(hidden_size)
+
+
+ self.output = nn.Sequential(
+ nn.Linear(hidden_size, hidden_size),
+ nn.LayerNorm(hidden_size),
+ nn.LeakyReLU(0.2, inplace=True),
+ nn.Linear(hidden_size, output_size)
+ )
+
+ #
+ # self.output = nn.Sequential(
+ # nn.Linear(hidden_size, hidden_size),
+ # nn.LayerNorm(hidden_size),
+ # nn.LeakyReLU(0.2, inplace=True),
+ # nn.Linear(hidden_size, output_size-4)
+ # )
+
+ # self.contact_net = nn.Sequential(
+ # nn.Linear(output_size-4, 64),
+ # nn.LayerNorm(64),
+ # nn.LeakyReLU(0.2, inplace=True),
+ # nn.Linear(64, 4)
+ # )
+
+ self.output.apply(init_weight)
+ self.emb.apply(init_weight)
+ self.z2init.apply(init_weight)
+ # self.contact_net.apply(init_weight)
+
+ def get_init_hidden(self, latent):
+ hidden = self.z2init(latent)
+ hidden = torch.split(hidden, self.hidden_size, dim=-1)
+ return list(hidden)
+
+ def forward(self, inputs, last_pred, hidden, p):
+ h_in = self.emb(inputs)
+ pos_enc = self.positional_encoder(p).to(inputs.device).detach()
+ h_in = h_in + pos_enc
+ for i in range(self.n_layers):
+ # print(h_in.shape)
+ hidden[i] = self.gru[i](h_in, hidden[i])
+ h_in = hidden[i]
+ pose_pred = self.output(h_in)
+ # pose_pred = self.output(h_in) + last_pred.detach()
+ # contact = self.contact_net(pose_pred)
+ # return torch.cat([pose_pred, contact], dim=-1), hidden
+ return pose_pred, hidden
+
+
+class TextDecoder(nn.Module):
+ def __init__(self, text_size, input_size, output_size, hidden_size, n_layers):
+ super(TextDecoder, self).__init__()
+ self.input_size = input_size
+ self.output_size = output_size
+ self.hidden_size = hidden_size
+ self.n_layers = n_layers
+ self.emb = nn.Sequential(
+ nn.Linear(input_size, hidden_size),
+ nn.LayerNorm(hidden_size),
+ nn.LeakyReLU(0.2, inplace=True))
+
+ self.gru = nn.ModuleList([nn.GRUCell(hidden_size, hidden_size) for i in range(self.n_layers)])
+ self.z2init = nn.Linear(text_size, hidden_size * n_layers)
+ self.positional_encoder = PositionalEncoding(hidden_size)
+
+ self.mu_net = nn.Linear(hidden_size, output_size)
+ self.logvar_net = nn.Linear(hidden_size, output_size)
+
+ self.emb.apply(init_weight)
+ self.z2init.apply(init_weight)
+ self.mu_net.apply(init_weight)
+ self.logvar_net.apply(init_weight)
+
+ def get_init_hidden(self, latent):
+
+ hidden = self.z2init(latent)
+ hidden = torch.split(hidden, self.hidden_size, dim=-1)
+
+ return list(hidden)
+
+ def forward(self, inputs, hidden, p):
+ # print(inputs.shape)
+ x_in = self.emb(inputs)
+ pos_enc = self.positional_encoder(p).to(inputs.device).detach()
+ x_in = x_in + pos_enc
+
+ for i in range(self.n_layers):
+ hidden[i] = self.gru[i](x_in, hidden[i])
+ h_in = hidden[i]
+ mu = self.mu_net(h_in)
+ logvar = self.logvar_net(h_in)
+ z = reparameterize(mu, logvar)
+ return z, mu, logvar, hidden
+
+class AttLayer(nn.Module):
+ def __init__(self, query_dim, key_dim, value_dim):
+ super(AttLayer, self).__init__()
+ self.W_q = nn.Linear(query_dim, value_dim)
+ self.W_k = nn.Linear(key_dim, value_dim, bias=False)
+ self.W_v = nn.Linear(key_dim, value_dim)
+
+ self.softmax = nn.Softmax(dim=1)
+ self.dim = value_dim
+
+ self.W_q.apply(init_weight)
+ self.W_k.apply(init_weight)
+ self.W_v.apply(init_weight)
+
+ def forward(self, query, key_mat):
+ '''
+ query (batch, query_dim)
+ key (batch, seq_len, key_dim)
+ '''
+ # print(query.shape)
+ query_vec = self.W_q(query).unsqueeze(-1) # (batch, value_dim, 1)
+ val_set = self.W_v(key_mat) # (batch, seq_len, value_dim)
+ key_set = self.W_k(key_mat) # (batch, seq_len, value_dim)
+
+ weights = torch.matmul(key_set, query_vec) / np.sqrt(self.dim)
+
+ co_weights = self.softmax(weights) # (batch, seq_len, 1)
+ values = val_set * co_weights # (batch, seq_len, value_dim)
+ pred = values.sum(dim=1) # (batch, value_dim)
+ return pred, co_weights
+
+ def short_cut(self, querys, keys):
+ return self.W_q(querys), self.W_k(keys)
+
+
+class TextEncoderBiGRU(nn.Module):
+ def __init__(self, word_size, pos_size, hidden_size, device):
+ super(TextEncoderBiGRU, self).__init__()
+ self.device = device
+
+ self.pos_emb = nn.Linear(pos_size, word_size)
+ self.input_emb = nn.Linear(word_size, hidden_size)
+ self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
+ # self.linear2 = nn.Linear(hidden_size, output_size)
+
+ self.input_emb.apply(init_weight)
+ self.pos_emb.apply(init_weight)
+ # self.linear2.apply(init_weight)
+ # self.batch_size = batch_size
+ self.hidden_size = hidden_size
+ self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
+
+ # input(batch_size, seq_len, dim)
+ def forward(self, word_embs, pos_onehot, cap_lens):
+ num_samples = word_embs.shape[0]
+
+ pos_embs = self.pos_emb(pos_onehot)
+ inputs = word_embs + pos_embs
+ input_embs = self.input_emb(inputs)
+ hidden = self.hidden.repeat(1, num_samples, 1)
+
+ cap_lens = cap_lens.data.tolist()
+ emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
+
+ gru_seq, gru_last = self.gru(emb, hidden)
+
+ gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1)
+ gru_seq = pad_packed_sequence(gru_seq, batch_first=True)[0]
+ forward_seq = gru_seq[..., :self.hidden_size]
+ backward_seq = gru_seq[..., self.hidden_size:].clone()
+
+ # Concate the forward and backward word embeddings
+ for i, length in enumerate(cap_lens):
+ backward_seq[i:i+1, :length] = torch.flip(backward_seq[i:i+1, :length].clone(), dims=[1])
+ gru_seq = torch.cat([forward_seq, backward_seq], dim=-1)
+
+ return gru_seq, gru_last
+
+
+class TextEncoderBiGRUCo(nn.Module):
+ def __init__(self, word_size, pos_size, hidden_size, output_size, device):
+ super(TextEncoderBiGRUCo, self).__init__()
+ self.device = device
+
+ self.pos_emb = nn.Linear(pos_size, word_size)
+ self.input_emb = nn.Linear(word_size, hidden_size)
+ self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
+ self.output_net = nn.Sequential(
+ nn.Linear(hidden_size * 2, hidden_size),
+ nn.LayerNorm(hidden_size),
+ nn.LeakyReLU(0.2, inplace=True),
+ nn.Linear(hidden_size, output_size)
+ )
+
+ self.input_emb.apply(init_weight)
+ self.pos_emb.apply(init_weight)
+ self.output_net.apply(init_weight)
+ # self.linear2.apply(init_weight)
+ # self.batch_size = batch_size
+ self.hidden_size = hidden_size
+ self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
+
+ # input(batch_size, seq_len, dim)
+ def forward(self, word_embs, pos_onehot, cap_lens):
+ num_samples = word_embs.shape[0]
+
+ pos_embs = self.pos_emb(pos_onehot)
+ inputs = word_embs + pos_embs
+ input_embs = self.input_emb(inputs)
+ hidden = self.hidden.repeat(1, num_samples, 1)
+
+ cap_lens = cap_lens.data.tolist()
+ emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
+
+ gru_seq, gru_last = self.gru(emb, hidden)
+
+ gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1)
+
+ return self.output_net(gru_last)
+
+
+class MotionEncoderBiGRUCo(nn.Module):
+ def __init__(self, input_size, hidden_size, output_size, device):
+ super(MotionEncoderBiGRUCo, self).__init__()
+ self.device = device
+
+ self.input_emb = nn.Linear(input_size, hidden_size)
+ self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
+ self.output_net = nn.Sequential(
+ nn.Linear(hidden_size*2, hidden_size),
+ nn.LayerNorm(hidden_size),
+ nn.LeakyReLU(0.2, inplace=True),
+ nn.Linear(hidden_size, output_size)
+ )
+
+ self.input_emb.apply(init_weight)
+ self.output_net.apply(init_weight)
+ self.hidden_size = hidden_size
+ self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
+
+ # input(batch_size, seq_len, dim)
+ def forward(self, inputs, m_lens):
+ num_samples = inputs.shape[0]
+
+ input_embs = self.input_emb(inputs)
+ hidden = self.hidden.repeat(1, num_samples, 1)
+
+ cap_lens = m_lens.data.tolist()
+ emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
+
+ gru_seq, gru_last = self.gru(emb, hidden)
+
+ gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1)
+
+ return self.output_net(gru_last)
+
+
+class MotionLenEstimatorBiGRU(nn.Module):
+ def __init__(self, word_size, pos_size, hidden_size, output_size):
+ super(MotionLenEstimatorBiGRU, self).__init__()
+
+ self.pos_emb = nn.Linear(pos_size, word_size)
+ self.input_emb = nn.Linear(word_size, hidden_size)
+ self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
+ nd = 512
+ self.output = nn.Sequential(
+ nn.Linear(hidden_size*2, nd),
+ nn.LayerNorm(nd),
+ nn.LeakyReLU(0.2, inplace=True),
+
+ nn.Linear(nd, nd // 2),
+ nn.LayerNorm(nd // 2),
+ nn.LeakyReLU(0.2, inplace=True),
+
+ nn.Linear(nd // 2, nd // 4),
+ nn.LayerNorm(nd // 4),
+ nn.LeakyReLU(0.2, inplace=True),
+
+ nn.Linear(nd // 4, output_size)
+ )
+ # self.linear2 = nn.Linear(hidden_size, output_size)
+
+ self.input_emb.apply(init_weight)
+ self.pos_emb.apply(init_weight)
+ self.output.apply(init_weight)
+ # self.linear2.apply(init_weight)
+ # self.batch_size = batch_size
+ self.hidden_size = hidden_size
+ self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
+
+ # input(batch_size, seq_len, dim)
+ def forward(self, word_embs, pos_onehot, cap_lens):
+ num_samples = word_embs.shape[0]
+
+ pos_embs = self.pos_emb(pos_onehot)
+ inputs = word_embs + pos_embs
+ input_embs = self.input_emb(inputs)
+ hidden = self.hidden.repeat(1, num_samples, 1)
+
+ cap_lens = cap_lens.data.tolist()
+ emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
+
+ gru_seq, gru_last = self.gru(emb, hidden)
+
+ gru_last = torch.cat([gru_last[0], gru_last[1]], dim=-1)
+
+ return self.output(gru_last)
diff --git a/data_loaders/custom/networks/trainers.py b/data_loaders/custom/networks/trainers.py
new file mode 100644
index 0000000..d4e2d6d
--- /dev/null
+++ b/data_loaders/custom/networks/trainers.py
@@ -0,0 +1,1089 @@
+import torch
+import torch.nn.functional as F
+import random
+from networks.modules import *
+from torch.utils.data import DataLoader
+import torch.optim as optim
+from torch.nn.utils import clip_grad_norm_
+# import tensorflow as tf
+from collections import OrderedDict
+from utils.utils import *
+from os.path import join as pjoin
+from data.dataset import collate_fn
+import codecs as cs
+
+
+class Logger(object):
+ def __init__(self, log_dir):
+ self.writer = tf.summary.create_file_writer(log_dir)
+
+ def scalar_summary(self, tag, value, step):
+ with self.writer.as_default():
+ tf.summary.scalar(tag, value, step=step)
+ self.writer.flush()
+
+class DecompTrainerV3(object):
+ def __init__(self, args, movement_enc, movement_dec):
+ self.opt = args
+ self.movement_enc = movement_enc
+ self.movement_dec = movement_dec
+ self.device = args.device
+
+ if args.is_train:
+ self.logger = Logger(args.log_dir)
+ self.sml1_criterion = torch.nn.SmoothL1Loss()
+ self.l1_criterion = torch.nn.L1Loss()
+ self.mse_criterion = torch.nn.MSELoss()
+
+
+ @staticmethod
+ def zero_grad(opt_list):
+ for opt in opt_list:
+ opt.zero_grad()
+
+ @staticmethod
+ def clip_norm(network_list):
+ for network in network_list:
+ clip_grad_norm_(network.parameters(), 0.5)
+
+ @staticmethod
+ def step(opt_list):
+ for opt in opt_list:
+ opt.step()
+
+ def forward(self, batch_data):
+ motions = batch_data
+ self.motions = motions.detach().to(self.device).float()
+ self.latents = self.movement_enc(self.motions[..., :-4])
+ self.recon_motions = self.movement_dec(self.latents)
+
+ def backward(self):
+ self.loss_rec = self.l1_criterion(self.recon_motions, self.motions)
+ # self.sml1_criterion(self.recon_motions[:, 1:] - self.recon_motions[:, :-1],
+ # self.motions[:, 1:] - self.recon_motions[:, :-1])
+ self.loss_sparsity = torch.mean(torch.abs(self.latents))
+ self.loss_smooth = self.l1_criterion(self.latents[:, 1:], self.latents[:, :-1])
+ self.loss = self.loss_rec + self.loss_sparsity * self.opt.lambda_sparsity +\
+ self.loss_smooth*self.opt.lambda_smooth
+
+ def update(self):
+ # time0 = time.time()
+ self.zero_grad([self.opt_movement_enc, self.opt_movement_dec])
+ # time1 = time.time()
+ # print('\t Zero_grad Time: %.5f s' % (time1 - time0))
+ self.backward()
+ # time2 = time.time()
+ # print('\t Backward Time: %.5f s' % (time2 - time1))
+ self.loss.backward()
+ # time3 = time.time()
+ # print('\t Loss backward Time: %.5f s' % (time3 - time2))
+ # self.clip_norm([self.movement_enc, self.movement_dec])
+ # time4 = time.time()
+ # print('\t Clip_norm Time: %.5f s' % (time4 - time3))
+ self.step([self.opt_movement_enc, self.opt_movement_dec])
+ # time5 = time.time()
+ # print('\t Step Time: %.5f s' % (time5 - time4))
+
+ loss_logs = OrderedDict({})
+ loss_logs['loss'] = self.loss_rec.item()
+ loss_logs['loss_rec'] = self.loss_rec.item()
+ loss_logs['loss_sparsity'] = self.loss_sparsity.item()
+ loss_logs['loss_smooth'] = self.loss_smooth.item()
+ return loss_logs
+
+ def save(self, file_name, ep, total_it):
+ state = {
+ 'movement_enc': self.movement_enc.state_dict(),
+ 'movement_dec': self.movement_dec.state_dict(),
+
+ 'opt_movement_enc': self.opt_movement_enc.state_dict(),
+ 'opt_movement_dec': self.opt_movement_dec.state_dict(),
+
+ 'ep': ep,
+ 'total_it': total_it,
+ }
+ torch.save(state, file_name)
+ return
+
+ def resume(self, model_dir):
+ checkpoint = torch.load(model_dir, map_location=self.device)
+
+ self.movement_dec.load_state_dict(checkpoint['movement_dec'])
+ self.movement_enc.load_state_dict(checkpoint['movement_enc'])
+
+ self.opt_movement_enc.load_state_dict(checkpoint['opt_movement_enc'])
+ self.opt_movement_dec.load_state_dict(checkpoint['opt_movement_dec'])
+
+ return checkpoint['ep'], checkpoint['total_it']
+
+ def train(self, train_dataloader, val_dataloader, plot_eval):
+ self.movement_enc.to(self.device)
+ self.movement_dec.to(self.device)
+
+ self.opt_movement_enc = optim.Adam(self.movement_enc.parameters(), lr=self.opt.lr)
+ self.opt_movement_dec = optim.Adam(self.movement_dec.parameters(), lr=self.opt.lr)
+
+ epoch = 0
+ it = 0
+
+ if self.opt.is_continue:
+ model_dir = pjoin(self.opt.model_dir, 'latest.tar')
+ epoch, it = self.resume(model_dir)
+
+ start_time = time.time()
+ total_iters = self.opt.max_epoch * len(train_dataloader)
+ print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_dataloader), len(val_dataloader)))
+ val_loss = 0
+ logs = OrderedDict()
+ while epoch < self.opt.max_epoch:
+ # time0 = time.time()
+ for i, batch_data in enumerate(train_dataloader):
+ self.movement_dec.train()
+ self.movement_enc.train()
+
+ # time1 = time.time()
+ # print('DataLoader Time: %.5f s'%(time1-time0) )
+ self.forward(batch_data)
+ # time2 = time.time()
+ # print('Forward Time: %.5f s'%(time2-time1))
+ log_dict = self.update()
+ # time3 = time.time()
+ # print('Update Time: %.5f s' % (time3 - time2))
+ # time0 = time3
+ for k, v in log_dict.items():
+ if k not in logs:
+ logs[k] = v
+ else:
+ logs[k] += v
+
+ it += 1
+ if it % self.opt.log_every == 0:
+ mean_loss = OrderedDict({'val_loss': val_loss})
+ self.logger.scalar_summary('val_loss', val_loss, it)
+
+ for tag, value in logs.items():
+ self.logger.scalar_summary(tag, value / self.opt.log_every, it)
+ mean_loss[tag] = value / self.opt.log_every
+ logs = OrderedDict()
+ print_current_loss_decomp(start_time, it, total_iters, mean_loss, epoch, i)
+
+ if it % self.opt.save_latest == 0:
+ self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)
+
+ self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)
+
+ epoch += 1
+ if epoch % self.opt.save_every_e == 0:
+ self.save(pjoin(self.opt.model_dir, 'E%04d.tar' % (epoch)), epoch, total_it=it)
+
+ print('Validation time:')
+
+ val_loss = 0
+ val_rec_loss = 0
+ val_sparcity_loss = 0
+ val_smooth_loss = 0
+ with torch.no_grad():
+ for i, batch_data in enumerate(val_dataloader):
+ self.forward(batch_data)
+ self.backward()
+ val_rec_loss += self.loss_rec.item()
+ val_smooth_loss += self.loss.item()
+ val_sparcity_loss += self.loss_sparsity.item()
+ val_smooth_loss += self.loss_smooth.item()
+ val_loss += self.loss.item()
+
+ val_loss = val_loss / (len(val_dataloader) + 1)
+ val_rec_loss = val_rec_loss / (len(val_dataloader) + 1)
+ val_sparcity_loss = val_sparcity_loss / (len(val_dataloader) + 1)
+ val_smooth_loss = val_smooth_loss / (len(val_dataloader) + 1)
+ print('Validation Loss: %.5f Reconstruction Loss: %.5f '
+ 'Sparsity Loss: %.5f Smooth Loss: %.5f' % (val_loss, val_rec_loss, val_sparcity_loss, \
+ val_smooth_loss))
+
+ if epoch % self.opt.eval_every_e == 0:
+ data = torch.cat([self.recon_motions[:4], self.motions[:4]], dim=0).detach().cpu().numpy()
+ save_dir = pjoin(self.opt.eval_dir, 'E%04d' % (epoch))
+ os.makedirs(save_dir, exist_ok=True)
+ plot_eval(data, save_dir)
+
+
+# VAE Sequence Decoder/Prior/Posterior latent by latent
+class CompTrainerV6(object):
+
+ def __init__(self, args, text_enc, seq_pri, seq_dec, att_layer, mov_dec, mov_enc=None, seq_post=None):
+ self.opt = args
+ self.text_enc = text_enc
+ self.seq_pri = seq_pri
+ self.att_layer = att_layer
+ self.device = args.device
+ self.seq_dec = seq_dec
+ self.mov_dec = mov_dec
+ self.mov_enc = mov_enc
+
+ if args.is_train:
+ self.seq_post = seq_post
+ # self.motion_dis
+ self.logger = Logger(args.log_dir)
+ self.l1_criterion = torch.nn.SmoothL1Loss()
+ self.gan_criterion = torch.nn.BCEWithLogitsLoss()
+ self.mse_criterion = torch.nn.MSELoss()
+
+ @staticmethod
+ def reparametrize(mu, logvar):
+ s_var = logvar.mul(0.5).exp_()
+ eps = s_var.data.new(s_var.size()).normal_()
+ return eps.mul(s_var).add_(mu)
+
+ @staticmethod
+ def ones_like(tensor, val=1.):
+ return torch.FloatTensor(tensor.size()).fill_(val).to(tensor.device).requires_grad_(False)
+
+ @staticmethod
+ def zeros_like(tensor, val=0.):
+ return torch.FloatTensor(tensor.size()).fill_(val).to(tensor.device).requires_grad_(False)
+
+ @staticmethod
+ def zero_grad(opt_list):
+ for opt in opt_list:
+ opt.zero_grad()
+
+ @staticmethod
+ def clip_norm(network_list):
+ for network in network_list:
+ clip_grad_norm_(network.parameters(), 0.5)
+
+ @staticmethod
+ def step(opt_list):
+ for opt in opt_list:
+ opt.step()
+
+ @staticmethod
+ def kl_criterion(mu1, logvar1, mu2, logvar2):
+ # KL( N(mu1, sigma2_1) || N(mu_2, sigma2_2))
+ # loss = log(sigma2/sigma1) + (sigma1^2 + (mu1 - mu2)^2)/(2*sigma2^2) - 1/2
+ sigma1 = logvar1.mul(0.5).exp()
+ sigma2 = logvar2.mul(0.5).exp()
+ kld = torch.log(sigma2 / sigma1) + (torch.exp(logvar1) + (mu1 - mu2) ** 2) / (
+ 2 * torch.exp(logvar2)) - 1 / 2
+ return kld.sum() / mu1.shape[0]
+
+ @staticmethod
+ def kl_criterion_unit(mu, logvar):
+ # KL( N(mu1, sigma2_1) || N(mu_2, sigma2_2))
+ # loss = log(sigma2/sigma1) + (sigma1^2 + (mu1 - mu2)^2)/(2*sigma2^2) - 1/2
+ kld = ((torch.exp(logvar) + mu ** 2) - logvar - 1) / 2
+ return kld.sum() / mu.shape[0]
+
+ def forward(self, batch_data, tf_ratio, mov_len, eval_mode=False):
+ word_emb, pos_ohot, caption, cap_lens, motions, m_lens = batch_data
+ word_emb = word_emb.detach().to(self.device).float()
+ pos_ohot = pos_ohot.detach().to(self.device).float()
+ motions = motions.detach().to(self.device).float()
+ self.cap_lens = cap_lens
+ self.caption = caption
+
+ # print(motions.shape)
+ # (batch_size, motion_len, pose_dim)
+ self.motions = motions
+
+ '''Movement Encoding'''
+ self.movements = self.mov_enc(self.motions[..., :-4]).detach()
+ # Initially input a mean vector
+ mov_in = self.mov_enc(
+ torch.zeros((self.motions.shape[0], self.opt.unit_length, self.motions.shape[-1] - 4), device=self.device)
+ ).squeeze(1).detach()
+ assert self.movements.shape[1] == mov_len
+
+ teacher_force = True if random.random() < tf_ratio else False
+
+ '''Text Encoding'''
+ # time0 = time.time()
+ # text_input = torch.cat([word_emb, pos_ohot], dim=-1)
+ word_hids, hidden = self.text_enc(word_emb, pos_ohot, cap_lens)
+ # print(word_hids.shape, hidden.shape)
+
+ if self.opt.text_enc_mod == 'bigru':
+ hidden_pos = self.seq_post.get_init_hidden(hidden)
+ hidden_pri = self.seq_pri.get_init_hidden(hidden)
+ hidden_dec = self.seq_dec.get_init_hidden(hidden)
+ elif self.opt.text_enc_mod == 'transformer':
+ hidden_pos = self.seq_post.get_init_hidden(hidden.detach())
+ hidden_pri = self.seq_pri.get_init_hidden(hidden.detach())
+ hidden_dec = self.seq_dec.get_init_hidden(hidden)
+
+ mus_pri = []
+ logvars_pri = []
+ mus_post = []
+ logvars_post = []
+ fake_mov_batch = []
+
+ query_input = []
+
+ # time1 = time.time()
+ # print("\t Text Encoder Cost:%5f" % (time1 - time0))
+ # print(self.movements.shape)
+
+ for i in range(mov_len):
+ # print("\t Sequence Measure")
+ # print(mov_in.shape)
+ mov_tgt = self.movements[:, i]
+ '''Local Attention Vector'''
+ att_vec, _ = self.att_layer(hidden_dec[-1], word_hids)
+ query_input.append(hidden_dec[-1])
+
+ tta = m_lens // self.opt.unit_length - i
+
+ if self.opt.text_enc_mod == 'bigru':
+ pos_in = torch.cat([mov_in, mov_tgt, att_vec], dim=-1)
+ pri_in = torch.cat([mov_in, att_vec], dim=-1)
+
+ elif self.opt.text_enc_mod == 'transformer':
+ pos_in = torch.cat([mov_in, mov_tgt, att_vec.detach()], dim=-1)
+ pri_in = torch.cat([mov_in, att_vec.detach()], dim=-1)
+
+ '''Posterior'''
+ z_pos, mu_pos, logvar_pos, hidden_pos = self.seq_post(pos_in, hidden_pos, tta)
+
+ '''Prior'''
+ z_pri, mu_pri, logvar_pri, hidden_pri = self.seq_pri(pri_in, hidden_pri, tta)
+
+ '''Decoder'''
+ if eval_mode:
+ dec_in = torch.cat([mov_in, att_vec, z_pri], dim=-1)
+ else:
+ dec_in = torch.cat([mov_in, att_vec, z_pos], dim=-1)
+ fake_mov, hidden_dec = self.seq_dec(dec_in, mov_in, hidden_dec, tta)
+
+ # print(fake_mov.shape)
+
+ mus_post.append(mu_pos)
+ logvars_post.append(logvar_pos)
+ mus_pri.append(mu_pri)
+ logvars_pri.append(logvar_pri)
+ fake_mov_batch.append(fake_mov.unsqueeze(1))
+
+ if teacher_force:
+ mov_in = self.movements[:, i].detach()
+ else:
+ mov_in = fake_mov.detach()
+
+
+ self.fake_movements = torch.cat(fake_mov_batch, dim=1)
+
+ # print(self.fake_movements.shape)
+
+ self.fake_motions = self.mov_dec(self.fake_movements)
+
+ self.mus_post = torch.cat(mus_post, dim=0)
+ self.mus_pri = torch.cat(mus_pri, dim=0)
+ self.logvars_post = torch.cat(logvars_post, dim=0)
+ self.logvars_pri = torch.cat(logvars_pri, dim=0)
+
+ def generate(self, word_emb, pos_ohot, cap_lens, m_lens, mov_len, dim_pose):
+ word_emb = word_emb.detach().to(self.device).float()
+ pos_ohot = pos_ohot.detach().to(self.device).float()
+ self.cap_lens = cap_lens
+
+ # print(motions.shape)
+ # (batch_size, motion_len, pose_dim)
+
+ '''Movement Encoding'''
+ # Initially input a mean vector
+ mov_in = self.mov_enc(
+ torch.zeros((word_emb.shape[0], self.opt.unit_length, dim_pose - 4), device=self.device)
+ ).squeeze(1).detach()
+
+ '''Text Encoding'''
+ # time0 = time.time()
+ # text_input = torch.cat([word_emb, pos_ohot], dim=-1)
+ word_hids, hidden = self.text_enc(word_emb, pos_ohot, cap_lens)
+ # print(word_hids.shape, hidden.shape)
+
+ hidden_pri = self.seq_pri.get_init_hidden(hidden)
+ hidden_dec = self.seq_dec.get_init_hidden(hidden)
+
+ mus_pri = []
+ logvars_pri = []
+ fake_mov_batch = []
+ att_wgt = []
+
+ # time1 = time.time()
+ # print("\t Text Encoder Cost:%5f" % (time1 - time0))
+ # print(self.movements.shape)
+
+ for i in range(mov_len):
+ # print("\t Sequence Measure")
+ # print(mov_in.shape)
+ '''Local Attention Vector'''
+ att_vec, co_weights = self.att_layer(hidden_dec[-1], word_hids)
+
+ tta = m_lens // self.opt.unit_length - i
+ # tta = m_lens - i
+
+ '''Prior'''
+ pri_in = torch.cat([mov_in, att_vec], dim=-1)
+ z_pri, mu_pri, logvar_pri, hidden_pri = self.seq_pri(pri_in, hidden_pri, tta)
+
+ '''Decoder'''
+ dec_in = torch.cat([mov_in, att_vec, z_pri], dim=-1)
+
+ fake_mov, hidden_dec = self.seq_dec(dec_in, mov_in, hidden_dec, tta)
+
+ # print(fake_mov.shape)
+ mus_pri.append(mu_pri)
+ logvars_pri.append(logvar_pri)
+ fake_mov_batch.append(fake_mov.unsqueeze(1))
+ att_wgt.append(co_weights)
+
+ mov_in = fake_mov.detach()
+
+ fake_movements = torch.cat(fake_mov_batch, dim=1)
+ att_wgts = torch.cat(att_wgt, dim=-1)
+
+ # print(self.fake_movements.shape)
+
+ fake_motions = self.mov_dec(fake_movements)
+
+ mus_pri = torch.cat(mus_pri, dim=0)
+ logvars_pri = torch.cat(logvars_pri, dim=0)
+
+ return fake_motions, mus_pri, att_wgts
+
+ def backward_G(self):
+ self.loss_mot_rec = self.l1_criterion(self.fake_motions, self.motions)
+ self.loss_mov_rec = self.l1_criterion(self.fake_movements, self.movements)
+
+ self.loss_kld = self.kl_criterion(self.mus_post, self.logvars_post, self.mus_pri, self.logvars_pri)
+
+ self.loss_gen = self.loss_mot_rec * self.opt.lambda_rec_mov + self.loss_mov_rec * self.opt.lambda_rec_mot + \
+ self.loss_kld * self.opt.lambda_kld
+ loss_logs = OrderedDict({})
+ loss_logs['loss_gen'] = self.loss_gen.item()
+ loss_logs['loss_mot_rec'] = self.loss_mot_rec.item()
+ loss_logs['loss_mov_rec'] = self.loss_mov_rec.item()
+ loss_logs['loss_kld'] = self.loss_kld.item()
+
+ return loss_logs
+ # self.loss_gen = self.loss_rec_mov
+
+ # self.loss_gen = self.loss_rec_mov * self.opt.lambda_rec_mov + self.loss_rec_mot + \
+ # self.loss_kld * self.opt.lambda_kld + \
+ # self.loss_mtgan_G * self.opt.lambda_gan_mt + self.loss_mvgan_G * self.opt.lambda_gan_mv
+
+
+ def update(self):
+
+ self.zero_grad([self.opt_text_enc, self.opt_seq_dec, self.opt_seq_post,
+ self.opt_seq_pri, self.opt_att_layer, self.opt_mov_dec])
+ # time2_0 = time.time()
+ # print("\t\t Zero Grad:%5f" % (time2_0 - time1))
+ loss_logs = self.backward_G()
+
+ # time2_1 = time.time()
+ # print("\t\t Backward_G :%5f" % (time2_1 - time2_0))
+ self.loss_gen.backward()
+
+ # time2_2 = time.time()
+ # print("\t\t Backward :%5f" % (time2_2 - time2_1))
+ self.clip_norm([self.text_enc, self.seq_dec, self.seq_post, self.seq_pri,
+ self.att_layer, self.mov_dec])
+
+ # time2_3 = time.time()
+ # print("\t\t Clip Norm :%5f" % (time2_3 - time2_2))
+ self.step([self.opt_text_enc, self.opt_seq_dec, self.opt_seq_post,
+ self.opt_seq_pri, self.opt_att_layer, self.opt_mov_dec])
+
+ # time2_4 = time.time()
+ # print("\t\t Step :%5f" % (time2_4 - time2_3))
+
+ # time2 = time.time()
+ # print("\t Update Generator Cost:%5f" % (time2 - time1))
+
+ # self.zero_grad([self.opt_att_layer])
+ # self.backward_Att()
+ # self.loss_lgan_G_.backward()
+ # self.clip_norm([self.att_layer])
+ # self.step([self.opt_att_layer])
+ # # time3 = time.time()
+ # # print("\t Update Att Cost:%5f" % (time3 - time2))
+
+ # self.loss_gen += self.loss_lgan_G_
+
+ return loss_logs
+
+ def to(self, device):
+ if self.opt.is_train:
+ self.gan_criterion.to(device)
+ self.mse_criterion.to(device)
+ self.l1_criterion.to(device)
+ self.seq_post.to(device)
+ self.mov_enc.to(device)
+ self.text_enc.to(device)
+ self.mov_dec.to(device)
+ self.seq_pri.to(device)
+ self.att_layer.to(device)
+ self.seq_dec.to(device)
+
+ def train_mode(self):
+ if self.opt.is_train:
+ self.seq_post.train()
+ self.mov_enc.eval()
+ # self.motion_dis.train()
+ # self.movement_dis.train()
+ self.mov_dec.train()
+ self.text_enc.train()
+ self.seq_pri.train()
+ self.att_layer.train()
+ self.seq_dec.train()
+
+
+ def eval_mode(self):
+ if self.opt.is_train:
+ self.seq_post.eval()
+ self.mov_enc.eval()
+ # self.motion_dis.train()
+ # self.movement_dis.train()
+ self.mov_dec.eval()
+ self.text_enc.eval()
+ self.seq_pri.eval()
+ self.att_layer.eval()
+ self.seq_dec.eval()
+
+
+ def save(self, file_name, ep, total_it, sub_ep, sl_len):
+ state = {
+ # 'latent_dis': self.latent_dis.state_dict(),
+ # 'motion_dis': self.motion_dis.state_dict(),
+ 'text_enc': self.text_enc.state_dict(),
+ 'seq_post': self.seq_post.state_dict(),
+ 'att_layer': self.att_layer.state_dict(),
+ 'seq_dec': self.seq_dec.state_dict(),
+ 'seq_pri': self.seq_pri.state_dict(),
+ 'mov_enc': self.mov_enc.state_dict(),
+ 'mov_dec': self.mov_dec.state_dict(),
+
+ # 'opt_motion_dis': self.opt_motion_dis.state_dict(),
+ 'opt_mov_dec': self.opt_mov_dec.state_dict(),
+ 'opt_text_enc': self.opt_text_enc.state_dict(),
+ 'opt_seq_pri': self.opt_seq_pri.state_dict(),
+ 'opt_att_layer': self.opt_att_layer.state_dict(),
+ 'opt_seq_post': self.opt_seq_post.state_dict(),
+ 'opt_seq_dec': self.opt_seq_dec.state_dict(),
+ # 'opt_movement_dis': self.opt_movement_dis.state_dict(),
+
+ 'ep': ep,
+ 'total_it': total_it,
+ 'sub_ep': sub_ep,
+ 'sl_len': sl_len
+ }
+ torch.save(state, file_name)
+ return
+
+ def load(self, model_dir):
+ checkpoint = torch.load(model_dir, map_location=self.device)
+ if self.opt.is_train:
+ self.seq_post.load_state_dict(checkpoint['seq_post'])
+ # self.opt_latent_dis.load_state_dict(checkpoint['opt_latent_dis'])
+
+ self.opt_text_enc.load_state_dict(checkpoint['opt_text_enc'])
+ self.opt_seq_post.load_state_dict(checkpoint['opt_seq_post'])
+ self.opt_att_layer.load_state_dict(checkpoint['opt_att_layer'])
+ self.opt_seq_pri.load_state_dict(checkpoint['opt_seq_pri'])
+ self.opt_seq_dec.load_state_dict(checkpoint['opt_seq_dec'])
+ self.opt_mov_dec.load_state_dict(checkpoint['opt_mov_dec'])
+
+ self.text_enc.load_state_dict(checkpoint['text_enc'])
+ self.mov_dec.load_state_dict(checkpoint['mov_dec'])
+ self.seq_pri.load_state_dict(checkpoint['seq_pri'])
+ self.att_layer.load_state_dict(checkpoint['att_layer'])
+ self.seq_dec.load_state_dict(checkpoint['seq_dec'])
+ self.mov_enc.load_state_dict(checkpoint['mov_enc'])
+
+ return checkpoint['ep'], checkpoint['total_it'], checkpoint['sub_ep'], checkpoint['sl_len']
+
+ def train(self, train_dataset, val_dataset, plot_eval):
+ self.to(self.device)
+
+ self.opt_text_enc = optim.Adam(self.text_enc.parameters(), lr=self.opt.lr)
+ self.opt_seq_post = optim.Adam(self.seq_post.parameters(), lr=self.opt.lr)
+ self.opt_seq_pri = optim.Adam(self.seq_pri.parameters(), lr=self.opt.lr)
+ self.opt_att_layer = optim.Adam(self.att_layer.parameters(), lr=self.opt.lr)
+ self.opt_seq_dec = optim.Adam(self.seq_dec.parameters(), lr=self.opt.lr)
+
+ self.opt_mov_dec = optim.Adam(self.mov_dec.parameters(), lr=self.opt.lr*0.1)
+
+ epoch = 0
+ it = 0
+ if self.opt.dataset_name == 't2m':
+ schedule_len = 10
+ elif self.opt.dataset_name == 'kit':
+ schedule_len = 6
+ sub_ep = 0
+
+ if self.opt.is_continue:
+ model_dir = pjoin(self.opt.model_dir, 'latest.tar')
+ epoch, it, sub_ep, schedule_len = self.load(model_dir)
+
+ invalid = True
+ start_time = time.time()
+ val_loss = 0
+ is_continue_and_first = self.opt.is_continue
+ while invalid:
+ train_dataset.reset_max_len(schedule_len * self.opt.unit_length)
+ val_dataset.reset_max_len(schedule_len * self.opt.unit_length)
+
+ train_loader = DataLoader(train_dataset, batch_size=self.opt.batch_size, drop_last=True, num_workers=4,
+ shuffle=True, collate_fn=collate_fn, pin_memory=True)
+ val_loader = DataLoader(val_dataset, batch_size=self.opt.batch_size, drop_last=True, num_workers=4,
+ shuffle=True, collate_fn=collate_fn, pin_memory=True)
+ print("Max_Length:%03d Training Split:%05d Validation Split:%04d" % (schedule_len, len(train_loader), len(val_loader)))
+
+ min_val_loss = np.inf
+ stop_cnt = 0
+ logs = OrderedDict()
+ for sub_epoch in range(sub_ep, self.opt.max_sub_epoch):
+ self.train_mode()
+
+ if is_continue_and_first:
+ sub_ep = 0
+ is_continue_and_first = False
+
+ tf_ratio = self.opt.tf_ratio
+
+ time1 = time.time()
+ for i, batch_data in enumerate(train_loader):
+ time2 = time.time()
+ self.forward(batch_data, tf_ratio, schedule_len)
+ time3 = time.time()
+ log_dict = self.update()
+ for k, v in log_dict.items():
+ if k not in logs:
+ logs[k] = v
+ else:
+ logs[k] += v
+ time4 = time.time()
+
+
+ it += 1
+ if it % self.opt.log_every == 0:
+ mean_loss = OrderedDict({'val_loss': val_loss})
+ self.logger.scalar_summary('val_loss', val_loss, it)
+ self.logger.scalar_summary('scheduled_length', schedule_len, it)
+
+ for tag, value in logs.items():
+ self.logger.scalar_summary(tag, value/self.opt.log_every, it)
+ mean_loss[tag] = value / self.opt.log_every
+ logs = OrderedDict()
+ print_current_loss(start_time, it, mean_loss, epoch, sub_epoch=sub_epoch, inner_iter=i,
+ tf_ratio=tf_ratio, sl_steps=schedule_len)
+
+ if it % self.opt.save_latest == 0:
+ self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it, sub_epoch, schedule_len)
+
+ time5 = time.time()
+ # print("Data Loader Time: %5f s" % ((time2 - time1)))
+ # print("Forward Time: %5f s" % ((time3 - time2)))
+ # print("Update Time: %5f s" % ((time4 - time3)))
+ # print('Per Iteration: %5f s' % ((time5 - time1)))
+ time1 = time5
+
+ self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it, sub_epoch, schedule_len)
+
+ epoch += 1
+ if epoch % self.opt.save_every_e == 0:
+ self.save(pjoin(self.opt.model_dir, 'E%03d_SE%02d_SL%02d.tar'%(epoch, sub_epoch, schedule_len)),
+ epoch, total_it=it, sub_ep=sub_epoch, sl_len=schedule_len)
+
+ print('Validation time:')
+
+ loss_mot_rec = 0
+ loss_mov_rec = 0
+ loss_kld = 0
+ val_loss = 0
+ with torch.no_grad():
+ for i, batch_data in enumerate(val_loader):
+ self.forward(batch_data, 0, schedule_len)
+ self.backward_G()
+ loss_mot_rec += self.loss_mot_rec.item()
+ loss_mov_rec += self.loss_mov_rec.item()
+ loss_kld += self.loss_kld.item()
+ val_loss += self.loss_gen.item()
+
+ loss_mot_rec /= len(val_loader) + 1
+ loss_mov_rec /= len(val_loader) + 1
+ loss_kld /= len(val_loader) + 1
+ val_loss /= len(val_loader) + 1
+ print('Validation Loss: %.5f Movement Recon Loss: %.5f Motion Recon Loss: %.5f KLD Loss: %.5f:' %
+ (val_loss, loss_mov_rec, loss_mot_rec, loss_kld))
+
+ if epoch % self.opt.eval_every_e == 0:
+ reco_data = self.fake_motions[:4]
+ with torch.no_grad():
+ self.forward(batch_data, 0, schedule_len, eval_mode=True)
+ fake_data = self.fake_motions[:4]
+ gt_data = self.motions[:4]
+ data = torch.cat([fake_data, reco_data, gt_data], dim=0).cpu().numpy()
+ captions = self.caption[:4] * 3
+ save_dir = pjoin(self.opt.eval_dir, 'E%03d_SE%02d_SL%02d'%(epoch, sub_epoch, schedule_len))
+ os.makedirs(save_dir, exist_ok=True)
+ plot_eval(data, save_dir, captions)
+
+ # if cl_ratio == 1:
+ if val_loss < min_val_loss:
+ min_val_loss = val_loss
+ stop_cnt = 0
+ elif stop_cnt < self.opt.early_stop_count:
+ stop_cnt += 1
+ elif stop_cnt >= self.opt.early_stop_count:
+ break
+ if val_loss - min_val_loss >= 0.1:
+ break
+
+ schedule_len += 1
+
+ if schedule_len > 49:
+ invalid = False
+
+
+class LengthEstTrainer(object):
+
+ def __init__(self, args, estimator):
+ self.opt = args
+ self.estimator = estimator
+ self.device = args.device
+
+ if args.is_train:
+ # self.motion_dis
+ self.logger = Logger(args.log_dir)
+ self.mul_cls_criterion = torch.nn.CrossEntropyLoss()
+
+ def resume(self, model_dir):
+ checkpoints = torch.load(model_dir, map_location=self.device)
+ self.estimator.load_state_dict(checkpoints['estimator'])
+ self.opt_estimator.load_state_dict(checkpoints['opt_estimator'])
+ return checkpoints['epoch'], checkpoints['iter']
+
+ def save(self, model_dir, epoch, niter):
+ state = {
+ 'estimator': self.estimator.state_dict(),
+ 'opt_estimator': self.opt_estimator.state_dict(),
+ 'epoch': epoch,
+ 'niter': niter,
+ }
+ torch.save(state, model_dir)
+
+ @staticmethod
+ def zero_grad(opt_list):
+ for opt in opt_list:
+ opt.zero_grad()
+
+ @staticmethod
+ def clip_norm(network_list):
+ for network in network_list:
+ clip_grad_norm_(network.parameters(), 0.5)
+
+ @staticmethod
+ def step(opt_list):
+ for opt in opt_list:
+ opt.step()
+
+ def train(self, train_dataloader, val_dataloader):
+ self.estimator.to(self.device)
+
+ self.opt_estimator = optim.Adam(self.estimator.parameters(), lr=self.opt.lr)
+
+ epoch = 0
+ it = 0
+
+ if self.opt.is_continue:
+ model_dir = pjoin(self.opt.model_dir, 'latest.tar')
+ epoch, it = self.resume(model_dir)
+
+ start_time = time.time()
+ total_iters = self.opt.max_epoch * len(train_dataloader)
+ print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_dataloader), len(val_dataloader)))
+ val_loss = 0
+ min_val_loss = np.inf
+ logs = OrderedDict({'loss': 0})
+ while epoch < self.opt.max_epoch:
+ # time0 = time.time()
+ for i, batch_data in enumerate(train_dataloader):
+ self.estimator.train()
+
+ word_emb, pos_ohot, _, cap_lens, _, m_lens = batch_data
+ word_emb = word_emb.detach().to(self.device).float()
+ pos_ohot = pos_ohot.detach().to(self.device).float()
+
+ pred_dis = self.estimator(word_emb, pos_ohot, cap_lens)
+
+ self.zero_grad([self.opt_estimator])
+
+ gt_labels = m_lens // self.opt.unit_length
+ gt_labels = gt_labels.long().to(self.device)
+ # print(gt_labels)
+ # print(pred_dis)
+ loss = self.mul_cls_criterion(pred_dis, gt_labels)
+
+ loss.backward()
+
+ self.clip_norm([self.estimator])
+ self.step([self.opt_estimator])
+
+ logs['loss'] += loss.item()
+
+ it += 1
+ if it % self.opt.log_every == 0:
+ mean_loss = OrderedDict({'val_loss': val_loss})
+ self.logger.scalar_summary('val_loss', val_loss, it)
+
+ for tag, value in logs.items():
+ self.logger.scalar_summary(tag, value / self.opt.log_every, it)
+ mean_loss[tag] = value / self.opt.log_every
+ logs = OrderedDict({'loss': 0})
+ print_current_loss_decomp(start_time, it, total_iters, mean_loss, epoch, i)
+
+ if it % self.opt.save_latest == 0:
+ self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)
+
+ self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)
+
+ epoch += 1
+ if epoch % self.opt.save_every_e == 0:
+ self.save(pjoin(self.opt.model_dir, 'E%04d.tar' % (epoch)), epoch, it)
+
+ print('Validation time:')
+
+ val_loss = 0
+ with torch.no_grad():
+ for i, batch_data in enumerate(val_dataloader):
+ word_emb, pos_ohot, _, cap_lens, _, m_lens = batch_data
+ word_emb = word_emb.detach().to(self.device).float()
+ pos_ohot = pos_ohot.detach().to(self.device).float()
+
+ pred_dis = self.estimator(word_emb, pos_ohot, cap_lens)
+
+ gt_labels = m_lens // self.opt.unit_length
+ gt_labels = gt_labels.long().to(self.device)
+ loss = self.mul_cls_criterion(pred_dis, gt_labels)
+
+ val_loss += loss.item()
+
+ val_loss = val_loss / (len(val_dataloader) + 1)
+ print('Validation Loss: %.5f' % (val_loss))
+
+ if val_loss < min_val_loss:
+ self.save(pjoin(self.opt.model_dir, 'finest.tar'), epoch, it)
+ min_val_loss = val_loss
+
+
+class TextMotionMatchTrainer(object):
+
+ def __init__(self, args, text_encoder, motion_encoder, movement_encoder):
+ self.opt = args
+ self.text_encoder = text_encoder
+ self.motion_encoder = motion_encoder
+ self.movement_encoder = movement_encoder
+ self.device = args.device
+
+ if args.is_train:
+ # self.motion_dis
+ self.logger = Logger(args.log_dir)
+ self.contrastive_loss = ContrastiveLoss(self.opt.negative_margin)
+
+ def resume(self, model_dir):
+ checkpoints = torch.load(model_dir, map_location=self.device)
+ self.text_encoder.load_state_dict(checkpoints['text_encoder'])
+ self.motion_encoder.load_state_dict(checkpoints['motion_encoder'])
+ self.movement_encoder.load_state_dict(checkpoints['movement_encoder'])
+
+ self.opt_text_encoder.load_state_dict(checkpoints['opt_text_encoder'])
+ self.opt_motion_encoder.load_state_dict(checkpoints['opt_motion_encoder'])
+ return checkpoints['epoch'], checkpoints['iter']
+
+ def save(self, model_dir, epoch, niter):
+ state = {
+ 'text_encoder': self.text_encoder.state_dict(),
+ 'motion_encoder': self.motion_encoder.state_dict(),
+ 'movement_encoder': self.movement_encoder.state_dict(),
+
+ 'opt_text_encoder': self.opt_text_encoder.state_dict(),
+ 'opt_motion_encoder': self.opt_motion_encoder.state_dict(),
+ 'epoch': epoch,
+ 'iter': niter,
+ }
+ torch.save(state, model_dir)
+
+ @staticmethod
+ def zero_grad(opt_list):
+ for opt in opt_list:
+ opt.zero_grad()
+
+ @staticmethod
+ def clip_norm(network_list):
+ for network in network_list:
+ clip_grad_norm_(network.parameters(), 0.5)
+
+ @staticmethod
+ def step(opt_list):
+ for opt in opt_list:
+ opt.step()
+
+ def to(self, device):
+ self.text_encoder.to(device)
+ self.motion_encoder.to(device)
+ self.movement_encoder.to(device)
+
+ def train_mode(self):
+ self.text_encoder.train()
+ self.motion_encoder.train()
+ self.movement_encoder.eval()
+
+ def forward(self, batch_data):
+ word_emb, pos_ohot, caption, cap_lens, motions, m_lens, _ = batch_data
+ word_emb = word_emb.detach().to(self.device).float()
+ pos_ohot = pos_ohot.detach().to(self.device).float()
+ motions = motions.detach().to(self.device).float()
+
+ # Sort the length of motions in descending order, (length of text has been sorted)
+ self.align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()
+ # print(self.align_idx)
+ # print(m_lens[self.align_idx])
+ motions = motions[self.align_idx]
+ m_lens = m_lens[self.align_idx]
+
+ '''Movement Encoding'''
+ movements = self.movement_encoder(motions[..., :-4]).detach()
+ m_lens = m_lens // self.opt.unit_length
+ self.motion_embedding = self.motion_encoder(movements, m_lens)
+
+ '''Text Encoding'''
+ # time0 = time.time()
+ # text_input = torch.cat([word_emb, pos_ohot], dim=-1)
+ self.text_embedding = self.text_encoder(word_emb, pos_ohot, cap_lens)
+ self.text_embedding = self.text_embedding.clone()[self.align_idx]
+
+
+ def backward(self):
+
+ batch_size = self.text_embedding.shape[0]
+ '''Positive pairs'''
+ pos_labels = torch.zeros(batch_size).to(self.text_embedding.device)
+ self.loss_pos = self.contrastive_loss(self.text_embedding, self.motion_embedding, pos_labels)
+
+ '''Negative Pairs, shifting index'''
+ neg_labels = torch.ones(batch_size).to(self.text_embedding.device)
+ shift = np.random.randint(0, batch_size-1)
+ new_idx = np.arange(shift, batch_size + shift) % batch_size
+ self.mis_motion_embedding = self.motion_embedding.clone()[new_idx]
+ self.loss_neg = self.contrastive_loss(self.text_embedding, self.mis_motion_embedding, neg_labels)
+ self.loss = self.loss_pos + self.loss_neg
+
+ loss_logs = OrderedDict({})
+ loss_logs['loss'] = self.loss.item()
+ loss_logs['loss_pos'] = self.loss_pos.item()
+ loss_logs['loss_neg'] = self.loss_neg.item()
+ return loss_logs
+
+
+ def update(self):
+
+ self.zero_grad([self.opt_motion_encoder, self.opt_text_encoder])
+ loss_logs = self.backward()
+ self.loss.backward()
+ self.clip_norm([self.text_encoder, self.motion_encoder])
+ self.step([self.opt_text_encoder, self.opt_motion_encoder])
+
+ return loss_logs
+
+
+ def train(self, train_dataloader, val_dataloader):
+ self.to(self.device)
+
+ self.opt_motion_encoder = optim.Adam(self.motion_encoder.parameters(), lr=self.opt.lr)
+ self.opt_text_encoder = optim.Adam(self.text_encoder.parameters(), lr=self.opt.lr)
+
+ epoch = 0
+ it = 0
+
+ if self.opt.is_continue:
+ model_dir = pjoin(self.opt.model_dir, 'latest.tar')
+ epoch, it = self.resume(model_dir)
+
+ start_time = time.time()
+ total_iters = self.opt.max_epoch * len(train_dataloader)
+ print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_dataloader), len(val_dataloader)))
+ val_loss = 0
+ logs = OrderedDict()
+
+ min_val_loss = np.inf
+ while epoch < self.opt.max_epoch:
+ # time0 = time.time()
+ for i, batch_data in enumerate(train_dataloader):
+ self.train_mode()
+
+ self.forward(batch_data)
+ # time3 = time.time()
+ log_dict = self.update()
+ for k, v in log_dict.items():
+ if k not in logs:
+ logs[k] = v
+ else:
+ logs[k] += v
+
+
+ it += 1
+ if it % self.opt.log_every == 0:
+ mean_loss = OrderedDict({'val_loss': val_loss})
+ self.logger.scalar_summary('val_loss', val_loss, it)
+
+ for tag, value in logs.items():
+ self.logger.scalar_summary(tag, value / self.opt.log_every, it)
+ mean_loss[tag] = value / self.opt.log_every
+ logs = OrderedDict()
+ print_current_loss_decomp(start_time, it, total_iters, mean_loss, epoch, i)
+
+ if it % self.opt.save_latest == 0:
+ self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)
+
+ self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)
+
+ epoch += 1
+ if epoch % self.opt.save_every_e == 0:
+ self.save(pjoin(self.opt.model_dir, 'E%04d.tar' % (epoch)), epoch, it)
+
+ print('Validation time:')
+
+ loss_pos_pair = 0
+ loss_neg_pair = 0
+ val_loss = 0
+ with torch.no_grad():
+ for i, batch_data in enumerate(val_dataloader):
+ self.forward(batch_data)
+ self.backward()
+ loss_pos_pair += self.loss_pos.item()
+ loss_neg_pair += self.loss_neg.item()
+ val_loss += self.loss.item()
+
+ loss_pos_pair /= len(val_dataloader) + 1
+ loss_neg_pair /= len(val_dataloader) + 1
+ val_loss /= len(val_dataloader) + 1
+ print('Validation Loss: %.5f Positive Loss: %.5f Negative Loss: %.5f' %
+ (val_loss, loss_pos_pair, loss_neg_pair))
+
+ if val_loss < min_val_loss:
+ self.save(pjoin(self.opt.model_dir, 'finest.tar'), epoch, it)
+ min_val_loss = val_loss
+
+ if epoch % self.opt.eval_every_e == 0:
+ pos_dist = F.pairwise_distance(self.text_embedding, self.motion_embedding)
+ neg_dist = F.pairwise_distance(self.text_embedding, self.mis_motion_embedding)
+
+ pos_str = ' '.join(['%.3f' % (pos_dist[i]) for i in range(pos_dist.shape[0])])
+ neg_str = ' '.join(['%.3f' % (neg_dist[i]) for i in range(neg_dist.shape[0])])
+
+ save_path = pjoin(self.opt.eval_dir, 'E%03d.txt' % (epoch))
+ with cs.open(save_path, 'w') as f:
+ f.write('Positive Pairs Distance\n')
+ f.write(pos_str + '\n')
+ f.write('Negative Pairs Distance\n')
+ f.write(neg_str + '\n')
diff --git a/data_loaders/custom/precalculate b/data_loaders/custom/precalculate
new file mode 100755
index 0000000..f5af59b
--- /dev/null
+++ b/data_loaders/custom/precalculate
@@ -0,0 +1,32 @@
+#! /usr/bin/env bash
+
+FILE="$1"
+
+joint_names=()
+
+# add root
+joint_names=("${joint_names[@]}" "\"$(grep ROOT ${FILE} | sed 's/ROOT //;s/\t//g')\",")
+
+# add all joints in order
+for joint in $(grep JOINT ${FILE} | sed 's/JOINT //;s/\t//g'); do
+ joint_names+=( "\"${joint}\"," ); # FIXME: hacky method to include quotes and comma
+done
+
+# remove comma from last joint
+last_joint=${joint_names[-1]}
+joint_names[-1]=${last_joint%,*}
+
+# convenience computations
+num_joints=${#joint_names[@]}
+num_params=$(( ${num_joints} * 12 - 1 ))
+
+# write outputs to file
+echo "From ${FILE}:"
+echo "Joints = ${num_joints}"
+echo "Params = ${num_params} (12 * Joints - 1)"
+
+echo
+echo "Use the following as RIG_JOINT_NAMES:"
+echo "RIG_JOINT_NAMES = ["
+for joint in "${joint_names[@]}"; do echo ${joint}; done
+echo "]"
diff --git a/data_loaders/custom/scripts/motion_process.py b/data_loaders/custom/scripts/motion_process.py
new file mode 100644
index 0000000..8b7eb6c
--- /dev/null
+++ b/data_loaders/custom/scripts/motion_process.py
@@ -0,0 +1,586 @@
+from os.path import join as pjoin
+
+from data_loaders.custom.common.skeleton import Skeleton
+import numpy as np
+import os
+from data_loaders.custom.common.quaternion import *
+from data_loaders.custom.utils.paramUtil import *
+
+import torch
+from tqdm import tqdm
+
+
+# Lower legs
+l_idx1, l_idx2 = 6, 1
+# Right/Left foot
+fid_r, fid_l = [9, 10], [4, 5]
+# Face direction, r_hip, l_hip, sdr_r, sdr_l
+face_joint_indx = [6, 1, 23, 18]
+# l_hip, r_hip
+r_hip, l_hip = 6, 1
+joints_num = 27 ## NOTE: we only define it manually here because get_opt is not used
+
+# positions (batch, joints_num, 3)
+def uniform_skeleton(positions, target_offset, n_raw_offsets, kinematic_chain):
+ src_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu')
+ src_offset = src_skel.get_offsets_joints(torch.from_numpy(positions[0]))
+ src_offset = src_offset.numpy()
+ tgt_offset = target_offset.numpy()
+ # print(src_offset)
+ # print(tgt_offset)
+ '''Calculate Scale Ratio as the ratio of legs'''
+ src_leg_len = np.abs(src_offset[l_idx1]).max() + np.abs(src_offset[l_idx2]).max()
+ tgt_leg_len = np.abs(tgt_offset[l_idx1]).max() + np.abs(tgt_offset[l_idx2]).max()
+
+ scale_rt = tgt_leg_len / src_leg_len
+ # print(scale_rt)
+ src_root_pos = positions[:, 0]
+ tgt_root_pos = src_root_pos * scale_rt
+
+ '''Inverse Kinematics'''
+ quat_params = src_skel.inverse_kinematics_np(positions, face_joint_indx)
+ # print(quat_params.shape)
+
+ '''Forward Kinematics'''
+ src_skel.set_offset(target_offset)
+ new_joints = src_skel.forward_kinematics_np(quat_params, tgt_root_pos)
+ return new_joints
+
+
+def extract_features(positions, feet_thre, n_raw_offsets, kinematic_chain, face_joint_indx, fid_r, fid_l):
+ global_positions = positions.copy()
+ """ Get Foot Contacts """
+
+ # import matplotlib.pyplot as plt
+ # plt.scatter(global_positions[:, 0, 0], global_positions[:, 0, 2], marker='*')
+ # # plt.plot(positions[:, 0, 0], positions[:, 0, 2], marker='o', color='r')
+ # plt.xlabel('x')
+ # plt.ylabel('z')
+ # plt.xlim([-3,6])
+ # plt.ylim([-3,6])
+ # plt.axis('equal')
+ # plt.show()
+
+ def foot_detect(positions, thres):
+ velfactor, heightfactor = np.array([thres, thres]), np.array([3.0, 2.0])
+
+ feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2
+ feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2
+ feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2
+ # feet_l_h = positions[:-1,fid_l,1]
+ # feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float)
+ feet_l = ((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(np.float)
+
+ feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2
+ feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2
+ feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2
+ # feet_r_h = positions[:-1,fid_r,1]
+ # feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float)
+ feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor)).astype(np.float)
+ return feet_l, feet_r
+
+ #
+ feet_l, feet_r = foot_detect(positions, feet_thre)
+ # feet_l, feet_r = foot_detect(positions, 0.002)
+
+ '''Quaternion and Cartesian representation'''
+ r_rot = None
+
+ def get_rifke(positions):
+ '''Local pose'''
+ positions[..., 0] -= positions[:, 0:1, 0]
+ positions[..., 2] -= positions[:, 0:1, 2]
+ '''All pose face Z+'''
+ positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions)
+ return positions
+
+ def get_quaternion(positions):
+ skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
+ # (seq_len, joints_num, 4)
+ quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False)
+
+ '''Fix Quaternion Discontinuity'''
+ quat_params = qfix(quat_params)
+ # (seq_len, 4)
+ r_rot = quat_params[:, 0].copy()
+ # print(r_rot[0])
+ '''Root Linear Velocity'''
+ # (seq_len - 1, 3)
+ velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
+ # print(r_rot.shape, velocity.shape)
+ velocity = qrot_np(r_rot[1:], velocity)
+ '''Root Angular Velocity'''
+ # (seq_len - 1, 4)
+ r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
+ quat_params[1:, 0] = r_velocity
+ # (seq_len, joints_num, 4)
+ return quat_params, r_velocity, velocity, r_rot
+
+ def get_cont6d_params(positions):
+ skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
+ # (seq_len, joints_num, 4)
+ quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True)
+
+ '''Quaternion to continuous 6D'''
+ cont_6d_params = quaternion_to_cont6d_np(quat_params)
+ # (seq_len, 4)
+ r_rot = quat_params[:, 0].copy()
+ # print(r_rot[0])
+ '''Root Linear Velocity'''
+ # (seq_len - 1, 3)
+ velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
+ # print(r_rot.shape, velocity.shape)
+ velocity = qrot_np(r_rot[1:], velocity)
+ '''Root Angular Velocity'''
+ # (seq_len - 1, 4)
+ r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
+ # (seq_len, joints_num, 4)
+ return cont_6d_params, r_velocity, velocity, r_rot
+
+ cont_6d_params, r_velocity, velocity, r_rot = get_cont6d_params(positions)
+ positions = get_rifke(positions)
+
+ # trejec = np.cumsum(np.concatenate([np.array([[0, 0, 0]]), velocity], axis=0), axis=0)
+ # r_rotations, r_pos = recover_ric_glo_np(r_velocity, velocity[:, [0, 2]])
+
+ # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*')
+ # plt.plot(ground_positions[:, 0, 0], ground_positions[:, 0, 2], marker='o', color='r')
+ # plt.plot(trejec[:, 0], trejec[:, 2], marker='^', color='g')
+ # plt.plot(r_pos[:, 0], r_pos[:, 2], marker='s', color='y')
+ # plt.xlabel('x')
+ # plt.ylabel('z')
+ # plt.axis('equal')
+ # plt.show()
+
+ '''Root height'''
+ root_y = positions[:, 0, 1:2]
+
+ '''Root rotation and linear velocity'''
+ # (seq_len-1, 1) rotation velocity along y-axis
+ # (seq_len-1, 2) linear velovity on xz plane
+ r_velocity = np.arcsin(r_velocity[:, 2:3])
+ l_velocity = velocity[:, [0, 2]]
+ # print(r_velocity.shape, l_velocity.shape, root_y.shape)
+ root_data = np.concatenate([r_velocity, l_velocity, root_y[:-1]], axis=-1)
+
+ '''Get Joint Rotation Representation'''
+ # (seq_len, (joints_num-1) *6) quaternion for skeleton joints
+ rot_data = cont_6d_params[:, 1:].reshape(len(cont_6d_params), -1)
+
+ '''Get Joint Rotation Invariant Position Represention'''
+ # (seq_len, (joints_num-1)*3) local joint position
+ ric_data = positions[:, 1:].reshape(len(positions), -1)
+
+ '''Get Joint Velocity Representation'''
+ # (seq_len-1, joints_num*3)
+ local_vel = qrot_np(np.repeat(r_rot[:-1, None], global_positions.shape[1], axis=1),
+ global_positions[1:] - global_positions[:-1])
+ local_vel = local_vel.reshape(len(local_vel), -1)
+
+ data = root_data
+ data = np.concatenate([data, ric_data[:-1]], axis=-1)
+ data = np.concatenate([data, rot_data[:-1]], axis=-1)
+ # print(dataset.shape, local_vel.shape)
+ data = np.concatenate([data, local_vel], axis=-1)
+ data = np.concatenate([data, feet_l, feet_r], axis=-1)
+
+ return data
+
+
+def process_file(positions, feet_thre):
+ # (seq_len, joints_num, 3)
+ # '''Down Sample'''
+ # positions = positions[::ds_num]
+
+ '''Uniform Skeleton'''
+ # zeros tgt_offsets for testing
+ # tgt_offsets = torch.zeros([positions.shape[-2], 3])
+ # Test
+ data_dir = './dataset/000021.npy'
+ n_raw_offsets = torch.from_numpy(custom_raw_offsets)
+ kinematic_chain = custom_kinematic_chain
+
+ # Get offsets of target skeleton
+ example_data = np.load(data_dir) # os.path.join(data_dir, example_id + '.npy'))
+ example_data = example_data.reshape(len(example_data), -1, 3)
+ example_data = torch.from_numpy(example_data)
+ tgt_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu')
+ # (joints_num, 3)
+ tgt_offsets = tgt_skel.get_offsets_joints(example_data[0])
+ # print(tgt_offsets)
+ positions = positions.detach().numpy()
+
+ positions = uniform_skeleton(positions, tgt_offsets, n_raw_offsets, kinematic_chain)
+
+ '''Put on Floor'''
+ floor_height = positions.min(axis=0).min(axis=0)[1]
+ positions[:, :, 1] -= floor_height
+ # print(floor_height)
+
+ # plot_3d_motion("./positions_1.mp4", kinematic_chain, positions, 'title', fps=20)
+
+ '''XZ at origin'''
+ root_pos_init = positions[0]
+ root_pose_init_xz = root_pos_init[0] * np.array([1, 0, 1])
+ positions = positions - root_pose_init_xz
+
+ # '''Move the first pose to origin '''
+ # root_pos_init = positions[0]
+ # positions = positions - root_pos_init[0]
+
+ '''All initially face Z+'''
+ r_hip, l_hip, sdr_r, sdr_l = face_joint_indx
+ across1 = root_pos_init[r_hip] - root_pos_init[l_hip]
+ across2 = root_pos_init[sdr_r] - root_pos_init[sdr_l]
+ across = across1 + across2
+ across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis]
+
+ # forward (3,), rotate around y-axis
+ forward_init = np.cross(np.array([[0, 1, 0]]), across, axis=-1)
+ # forward (3,)
+ forward_init = forward_init / np.sqrt((forward_init ** 2).sum(axis=-1))[..., np.newaxis]
+
+ # print(forward_init)
+
+ target = np.array([[0, 0, 1]])
+ root_quat_init = qbetween_np(forward_init, target)
+ root_quat_init = np.ones(positions.shape[:-1] + (4,)) * root_quat_init
+
+ positions_b = positions.copy()
+
+ positions = qrot_np(root_quat_init, positions)
+
+ # plot_3d_motion("./positions_2.mp4", kinematic_chain, positions, 'title', fps=20)
+
+ '''New ground truth positions'''
+ global_positions = positions.copy()
+
+ # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*')
+ # plt.plot(positions[:, 0, 0], positions[:, 0, 2], marker='o', color='r')
+ # plt.xlabel('x')
+ # plt.ylabel('z')
+ # plt.axis('equal')
+ # plt.show()
+
+ """ Get Foot Contacts """
+
+ def foot_detect(positions, thres):
+ velfactor, heightfactor = np.array([thres, thres]), np.array([3.0, 2.0])
+
+ feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2
+ feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2
+ feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2
+ # feet_l_h = positions[:-1,fid_l,1]
+ # feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float)
+ feet_l = ((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(np.float)
+
+ feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2
+ feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2
+ feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2
+ # feet_r_h = positions[:-1,fid_r,1]
+ # feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float)
+ feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor)).astype(np.float)
+ return feet_l, feet_r
+ #
+ feet_l, feet_r = foot_detect(positions, feet_thre)
+ # feet_l, feet_r = foot_detect(positions, 0.002)
+
+ '''Quaternion and Cartesian representation'''
+ r_rot = None
+
+ def get_rifke(positions):
+ '''Local pose'''
+ positions[..., 0] -= positions[:, 0:1, 0]
+ positions[..., 2] -= positions[:, 0:1, 2]
+ '''All pose face Z+'''
+ positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions)
+ return positions
+
+ def get_quaternion(positions):
+ skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
+ # (seq_len, joints_num, 4)
+ quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False)
+
+ '''Fix Quaternion Discontinuity'''
+ quat_params = qfix(quat_params)
+ # (seq_len, 4)
+ r_rot = quat_params[:, 0].copy()
+ # print(r_rot[0])
+ '''Root Linear Velocity'''
+ # (seq_len - 1, 3)
+ velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
+ # print(r_rot.shape, velocity.shape)
+ velocity = qrot_np(r_rot[1:], velocity)
+ '''Root Angular Velocity'''
+ # (seq_len - 1, 4)
+ r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
+ quat_params[1:, 0] = r_velocity
+ # (seq_len, joints_num, 4)
+ return quat_params, r_velocity, velocity, r_rot
+
+ def get_cont6d_params(positions):
+ skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
+ # (seq_len, joints_num, 4)
+ quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True)
+
+ '''Quaternion to continuous 6D'''
+ cont_6d_params = quaternion_to_cont6d_np(quat_params)
+ # (seq_len, 4)
+ r_rot = quat_params[:, 0].copy()
+ # print(r_rot[0])
+ '''Root Linear Velocity'''
+ # (seq_len - 1, 3)
+ velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
+ # print(r_rot.shape, velocity.shape)
+ velocity = qrot_np(r_rot[1:], velocity)
+ '''Root Angular Velocity'''
+ # (seq_len - 1, 4)
+ r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
+ # (seq_len, joints_num, 4)
+ return cont_6d_params, r_velocity, velocity, r_rot
+
+ cont_6d_params, r_velocity, velocity, r_rot = get_cont6d_params(positions)
+ positions = get_rifke(positions)
+
+ # trejec = np.cumsum(np.concatenate([np.array([[0, 0, 0]]), velocity], axis=0), axis=0)
+ # r_rotations, r_pos = recover_ric_glo_np(r_velocity, velocity[:, [0, 2]])
+
+ # plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*')
+ # plt.plot(ground_positions[:, 0, 0], ground_positions[:, 0, 2], marker='o', color='r')
+ # plt.plot(trejec[:, 0], trejec[:, 2], marker='^', color='g')
+ # plt.plot(r_pos[:, 0], r_pos[:, 2], marker='s', color='y')
+ # plt.xlabel('x')
+ # plt.ylabel('z')
+ # plt.axis('equal')
+ # plt.show()
+
+ '''Root height'''
+ root_y = positions[:, 0, 1:2]
+
+ '''Root rotation and linear velocity'''
+ # (seq_len-1, 1) rotation velocity along y-axis
+ # (seq_len-1, 2) linear velovity on xz plane
+ r_velocity = np.arcsin(r_velocity[:, 2:3])
+ l_velocity = velocity[:, [0, 2]]
+ # print(r_velocity.shape, l_velocity.shape, root_y.shape)
+ root_data = np.concatenate([r_velocity, l_velocity, root_y[:-1]], axis=-1)
+
+ '''Get Joint Rotation Representation'''
+ # (seq_len, (joints_num-1) *6) quaternion for skeleton joints
+ rot_data = cont_6d_params[:, 1:].reshape(len(cont_6d_params), -1)
+
+ '''Get Joint Rotation Invariant Position Represention'''
+ # (seq_len, (joints_num-1)*3) local joint position
+ ric_data = positions[:, 1:].reshape(len(positions), -1)
+
+ '''Get Joint Velocity Representation'''
+ # (seq_len-1, joints_num*3)
+ local_vel = qrot_np(np.repeat(r_rot[:-1, None], global_positions.shape[1], axis=1),
+ global_positions[1:] - global_positions[:-1])
+ local_vel = local_vel.reshape(len(local_vel), -1)
+
+ data = root_data
+ data = np.concatenate([data, ric_data[:-1]], axis=-1)
+ data = np.concatenate([data, rot_data[:-1]], axis=-1)
+ # print(dataset.shape, local_vel.shape)
+ data = np.concatenate([data, local_vel], axis=-1)
+ data = np.concatenate([data, feet_l, feet_r], axis=-1)
+
+ return data, global_positions, positions, l_velocity
+
+
+# Recover global angle and positions for rotation dataset
+# root_rot_velocity (B, seq_len, 1)
+# root_linear_velocity (B, seq_len, 2)
+# root_y (B, seq_len, 1)
+# ric_data (B, seq_len, (joints_num - 1)*3)
+# rot_data (B, seq_len, (joints_num - 1)*6)
+# local_velocity (B, seq_len, joints_num*3)
+# foot contact (B, seq_len, 4)
+def recover_root_rot_pos(data, abs_3d=False, return_rot_ang=False):
+ """
+ data: (pose, x, z, y)
+ """
+ if abs_3d:
+ '''Y-axis rotaion is absolute (already summed)'''
+ r_rot_ang = data[..., 0]
+ else:
+ rot_vel = data[..., 0]
+ r_rot_ang = torch.zeros_like(rot_vel).to(data.device)
+ '''Get Y-axis rotation from rotation velocity'''
+ r_rot_ang[..., 1:] = rot_vel[..., :-1]
+ r_rot_ang = torch.cumsum(r_rot_ang, dim=-1)
+
+ r_rot_quat = torch.zeros(data.shape[:-1] + (4,)).to(data.device)
+ r_rot_quat[..., 0] = torch.cos(r_rot_ang)
+ r_rot_quat[..., 2] = torch.sin(r_rot_ang)
+
+ r_pos = torch.zeros(data.shape[:-1] + (3,)).to(data.device)
+
+ if abs_3d:
+ '''r_pos is absolute and not depends on Y-axis rotation. And already summed'''
+ # (x,z) [0,2] <= (x,z) [1,2]
+ r_pos[..., :, [0, 2]] = data[..., :, 1:3]
+ else:
+ '''Add Y-axis rotation to root position'''
+ # (x,z) [0,2] <= (x,z) [1,2]
+ # adding zero at 0 index
+ # data [+1, -2, -3, +5, xx]
+ # r_pose [0, +1, -2, -3, +5]
+ # r_pos[..., 1be:, [0, 2]] = data[..., :-1, 1:3]
+ r_pos[..., 1:, [0, 2]] = data[..., :-1, 1:3].float()
+ r_pos = qrot(qinv(r_rot_quat), r_pos)
+ r_pos = torch.cumsum(r_pos, dim=-2)
+
+ r_pos[..., 1] = data[..., 3]
+ if return_rot_ang:
+ return r_rot_quat, r_pos, r_rot_ang
+ return r_rot_quat, r_pos
+
+
+def recover_from_rot(data, joints_num, skeleton, abs_3d=False):
+ r_rot_quat, r_pos = recover_root_rot_pos(data, abs_3d=abs_3d)
+
+ r_rot_cont6d = quaternion_to_cont6d(r_rot_quat)
+
+ start_indx = 1 + 2 + 1 + (joints_num - 1) * 3
+ end_indx = start_indx + (joints_num - 1) * 6
+ cont6d_params = data[..., start_indx:end_indx]
+ # print(r_rot_cont6d.shape, cont6d_params.shape, r_pos.shape)
+ cont6d_params = torch.cat([r_rot_cont6d, cont6d_params], dim=-1)
+ cont6d_params = cont6d_params.view(-1, joints_num, 6)
+
+ positions = skeleton.forward_kinematics_cont6d(cont6d_params, r_pos)
+
+ return positions
+
+def recover_rot(data):
+ # dataset [bs, seqlen, 263/251] HumanML/KIT
+ joints_num = 22 if data.shape[-1] == 263 else 21
+ r_rot_quat, r_pos = recover_root_rot_pos(data)
+ r_pos_pad = torch.cat([r_pos, torch.zeros_like(r_pos)], dim=-1).unsqueeze(-2)
+ r_rot_cont6d = quaternion_to_cont6d(r_rot_quat)
+ start_indx = 1 + 2 + 1 + (joints_num - 1) * 3
+ end_indx = start_indx + (joints_num - 1) * 6
+ cont6d_params = data[..., start_indx:end_indx]
+ cont6d_params = torch.cat([r_rot_cont6d, cont6d_params], dim=-1)
+ cont6d_params = cont6d_params.view(-1, joints_num, 6)
+ cont6d_params = torch.cat([cont6d_params, r_pos_pad], dim=-2)
+ return cont6d_params
+
+
+def recover_from_ric(data, joints_num, abs_3d=False):
+ r_rot_quat, r_pos = recover_root_rot_pos(data, abs_3d=abs_3d)
+ positions = data[..., 4:(joints_num - 1) * 3 + 4]
+ positions = positions.view(positions.shape[:-1] + (-1, 3))
+
+ '''Add Y-axis rotation to local joints'''
+ positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)
+
+ '''Add root XZ to joints'''
+ positions[..., 0] += r_pos[..., 0:1]
+ positions[..., 2] += r_pos[..., 2:3]
+
+ '''Concate root and joints'''
+ positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)
+
+ return positions
+'''
+For Text2Motion Dataset
+'''
+'''
+if __name__ == "__main__":
+ example_id = "000021"
+ # Lower legs
+ l_idx1, l_idx2 = 5, 8
+ # Right/Left foot
+ fid_r, fid_l = [8, 11], [7, 10]
+ # Face direction, r_hip, l_hip, sdr_r, sdr_l
+ face_joint_indx = [2, 1, 17, 16]
+ # l_hip, r_hip
+ r_hip, l_hip = 2, 1
+ joints_num = 22
+ # ds_num = 8
+ data_dir = '../dataset/pose_data_raw/joints/'
+ save_dir1 = '../dataset/pose_data_raw/new_joints/'
+ save_dir2 = '../dataset/pose_data_raw/new_joint_vecs/'
+
+ n_raw_offsets = torch.from_numpy(t2m_raw_offsets)
+ kinematic_chain = t2m_kinematic_chain
+
+ # Get offsets of target skeleton
+ example_data = np.load(os.path.join(data_dir, example_id + '.npy'))
+ example_data = example_data.reshape(len(example_data), -1, 3)
+ example_data = torch.from_numpy(example_data)
+ tgt_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu')
+ # (joints_num, 3)
+ tgt_offsets = tgt_skel.get_offsets_joints(example_data[0])
+ # print(tgt_offsets)
+
+ source_list = os.listdir(data_dir)
+ frame_num = 0
+ for source_file in tqdm(source_list):
+ source_data = np.load(os.path.join(data_dir, source_file))[:, :joints_num]
+ try:
+ dataset, ground_positions, positions, l_velocity = process_file(source_data, 0.002)
+ rec_ric_data = recover_from_ric(torch.from_numpy(dataset).unsqueeze(0).float(), joints_num)
+ np.save(pjoin(save_dir1, source_file), rec_ric_data.squeeze().numpy())
+ np.save(pjoin(save_dir2, source_file), dataset)
+ frame_num += dataset.shape[0]
+ except Exception as e:
+ print(source_file)
+ print(e)
+
+ print('Total clips: %d, Frames: %d, Duration: %fm' %
+ (len(source_list), frame_num, frame_num / 20 / 60))
+'''
+
+if __name__ == "__main__":
+ example_id = "03950_gt"
+ # Lower legs
+ l_idx1, l_idx2 = 17, 18
+ # Right/Left foot
+ fid_r, fid_l = [14, 15], [19, 20]
+ # Face direction, r_hip, l_hip, sdr_r, sdr_l
+ face_joint_indx = [11, 16, 5, 8]
+ # l_hip, r_hip
+ r_hip, l_hip = 11, 16
+ joints_num = 21
+ # ds_num = 8
+ data_dir = '../dataset/kit_mocap_dataset/joints/'
+ save_dir1 = '../dataset/kit_mocap_dataset/new_joints/'
+ save_dir2 = '../dataset/kit_mocap_dataset/new_joint_vecs/'
+
+ n_raw_offsets = torch.from_numpy(kit_raw_offsets)
+ kinematic_chain = kit_kinematic_chain
+
+ '''Get offsets of target skeleton'''
+ example_data = np.load(os.path.join(data_dir, example_id + '.npy'))
+ example_data = example_data.reshape(len(example_data), -1, 3)
+ example_data = torch.from_numpy(example_data)
+ tgt_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu')
+ # (joints_num, 3)
+ tgt_offsets = tgt_skel.get_offsets_joints(example_data[0])
+ # print(tgt_offsets)
+
+ source_list = os.listdir(data_dir)
+ frame_num = 0
+ '''Read source dataset'''
+ for source_file in tqdm(source_list):
+ source_data = np.load(os.path.join(data_dir, source_file))[:, :joints_num]
+ try:
+ name = ''.join(source_file[:-7].split('_')) + '.npy'
+ data, ground_positions, positions, l_velocity = process_file(source_data, 0.05)
+ rec_ric_data = recover_from_ric(torch.from_numpy(data).unsqueeze(0).float(), joints_num)
+ if np.isnan(rec_ric_data.numpy()).any():
+ print(source_file)
+ continue
+ np.save(pjoin(save_dir1, name), rec_ric_data.squeeze().numpy())
+ np.save(pjoin(save_dir2, name), data)
+ frame_num += data.shape[0]
+ except Exception as e:
+ print(source_file)
+ print(e)
+
+ print('Total clips: %d, Frames: %d, Duration: %fm' %
+ (len(source_list), frame_num, frame_num / 12.5 / 60))
diff --git a/data_loaders/custom/utils/get_opt.py b/data_loaders/custom/utils/get_opt.py
new file mode 100644
index 0000000..186f5f3
--- /dev/null
+++ b/data_loaders/custom/utils/get_opt.py
@@ -0,0 +1,82 @@
+import os
+from argparse import Namespace
+import re
+from os.path import join as pjoin
+from data_loaders.custom.utils.word_vectorizer import POS_enumerator
+
+
+def is_float(numStr):
+ flag = False
+ numStr = str(numStr).strip().lstrip('-').lstrip('+') # 去除正数(+)、负数(-)符号
+ try:
+ reg = re.compile(r'^[-+]?[0-9]+\.[0-9]+$')
+ res = reg.match(str(numStr))
+ if res:
+ flag = True
+ except Exception as ex:
+ print("is_float() - error: " + str(ex))
+ return flag
+
+
+def is_number(numStr):
+ flag = False
+ numStr = str(numStr).strip().lstrip('-').lstrip('+') # 去除正数(+)、负数(-)符号
+ if str(numStr).isdigit():
+ flag = True
+ return flag
+
+
+def get_opt(opt_path, device, mode, max_motion_length, use_abs3d=False):
+ opt = Namespace()
+ opt_dict = vars(opt)
+
+ skip = ('-------------- End ----------------',
+ '------------ Options -------------',
+ '\n')
+ print('Reading', opt_path)
+ with open(opt_path) as f:
+ for line in f:
+ if line.strip() not in skip:
+ # print(line.strip())
+ key, value = line.strip().split(': ')
+ if value in ('True', 'False'):
+ opt_dict[key] = bool(value)
+ elif is_float(value):
+ opt_dict[key] = float(value)
+ elif is_number(value):
+ opt_dict[key] = int(value)
+ else:
+ opt_dict[key] = str(value)
+
+ # print(opt)
+ opt_dict['which_epoch'] = 'latest'
+ opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)
+ opt.model_dir = pjoin(opt.save_root, 'model')
+ opt.meta_dir = pjoin(opt.save_root, 'meta')
+
+ if opt.dataset_name == 't2m':
+ opt.data_root = './dataset/Custom' ## FIXME: Make sure this matches class name
+ data_dir = 'new_joint_vecs'
+ text_dir = 'texts'
+ opt.motion_dir = pjoin(opt.data_root, data_dir)
+ opt.text_dir = pjoin(opt.data_root, text_dir)
+
+
+ ## Compute dim_pose based on joints_num supplied in humanml_opt.txt
+ opt.dim_pose = 12 * opt.joints_num - 1
+ # NOTE: UNET needs to uses multiples of 16
+ opt.max_motion_length = max_motion_length
+ print(f'WARNING: max_motion_length is set to {max_motion_length}')
+ elif opt.dataset_name == 'kit':
+ raise NotImplementedError()
+ else:
+ raise KeyError('Dataset not recognized')
+
+ opt.dim_word = 300
+ opt.num_classes = 200 // opt.unit_length
+ opt.dim_pos_ohot = len(POS_enumerator)
+ opt.is_train = False
+ opt.is_continue = False
+ opt.device = device
+
+ return opt
diff --git a/data_loaders/custom/utils/metrics.py b/data_loaders/custom/utils/metrics.py
new file mode 100644
index 0000000..291bbd6
--- /dev/null
+++ b/data_loaders/custom/utils/metrics.py
@@ -0,0 +1,253 @@
+import numpy as np
+from scipy import linalg
+from scipy.ndimage import uniform_filter1d
+import torch
+
+
+# (X - X_train)*(X - X_train) = -2X*X_train + X*X + X_train*X_train
+def euclidean_distance_matrix(matrix1, matrix2):
+ """
+ Params:
+ -- matrix1: N1 x D
+ -- matrix2: N2 x D
+ Returns:
+ -- dist: N1 x N2
+ dist[i, j] == distance(matrix1[i], matrix2[j])
+ """
+ assert matrix1.shape[1] == matrix2.shape[1]
+ d1 = -2 * np.dot(matrix1, matrix2.T) # shape (num_test, num_train)
+ d2 = np.sum(np.square(matrix1), axis=1, keepdims=True) # shape (num_test, 1)
+ d3 = np.sum(np.square(matrix2), axis=1) # shape (num_train, )
+ dists = np.sqrt(d1 + d2 + d3) # broadcasting
+ return dists
+
+def calculate_top_k(mat, top_k):
+ size = mat.shape[0]
+ gt_mat = np.expand_dims(np.arange(size), 1).repeat(size, 1)
+ bool_mat = (mat == gt_mat)
+ correct_vec = False
+ top_k_list = []
+ for i in range(top_k):
+# print(correct_vec, bool_mat[:, i])
+ correct_vec = (correct_vec | bool_mat[:, i])
+ # print(correct_vec)
+ top_k_list.append(correct_vec[:, None])
+ top_k_mat = np.concatenate(top_k_list, axis=1)
+ return top_k_mat
+
+
+def calculate_R_precision(embedding1, embedding2, top_k, sum_all=False):
+ dist_mat = euclidean_distance_matrix(embedding1, embedding2)
+ argmax = np.argsort(dist_mat, axis=1)
+ top_k_mat = calculate_top_k(argmax, top_k)
+ if sum_all:
+ return top_k_mat.sum(axis=0)
+ else:
+ return top_k_mat
+
+
+def calculate_matching_score(embedding1, embedding2, sum_all=False):
+ assert len(embedding1.shape) == 2
+ assert embedding1.shape[0] == embedding2.shape[0]
+ assert embedding1.shape[1] == embedding2.shape[1]
+
+ dist = linalg.norm(embedding1 - embedding2, axis=1)
+ if sum_all:
+ return dist.sum(axis=0)
+ else:
+ return dist
+
+
+
+def calculate_activation_statistics(activations):
+ """
+ Params:
+ -- activation: num_samples x dim_feat
+ Returns:
+ -- mu: dim_feat
+ -- sigma: dim_feat x dim_feat
+ """
+ mu = np.mean(activations, axis=0)
+ cov = np.cov(activations, rowvar=False)
+ return mu, cov
+
+
+def calculate_diversity(activation, diversity_times):
+ assert len(activation.shape) == 2
+ assert activation.shape[0] > diversity_times
+ num_samples = activation.shape[0]
+
+ first_indices = np.random.choice(num_samples, diversity_times, replace=False)
+ second_indices = np.random.choice(num_samples, diversity_times, replace=False)
+ dist = linalg.norm(activation[first_indices] - activation[second_indices], axis=1)
+ return dist.mean()
+
+
+def calculate_multimodality(activation, multimodality_times):
+ assert len(activation.shape) == 3
+ assert activation.shape[1] > multimodality_times
+ num_per_sent = activation.shape[1]
+
+ first_dices = np.random.choice(num_per_sent, multimodality_times, replace=False)
+ second_dices = np.random.choice(num_per_sent, multimodality_times, replace=False)
+ dist = linalg.norm(activation[:, first_dices] - activation[:, second_dices], axis=2)
+ return dist.mean()
+
+
+def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
+ """Numpy implementation of the Frechet Distance.
+ The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
+ and X_2 ~ N(mu_2, C_2) is
+ d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
+ Stable version by Dougal J. Sutherland.
+ Params:
+ -- mu1 : Numpy array containing the activations of a layer of the
+ inception net (like returned by the function 'get_predictions')
+ for generated samples.
+ -- mu2 : The sample mean over activations, precalculated on an
+ representative dataset set.
+ -- sigma1: The covariance matrix over activations for generated samples.
+ -- sigma2: The covariance matrix over activations, precalculated on an
+ representative dataset set.
+ Returns:
+ -- : The Frechet Distance.
+ """
+
+ mu1 = np.atleast_1d(mu1)
+ mu2 = np.atleast_1d(mu2)
+
+ sigma1 = np.atleast_2d(sigma1)
+ sigma2 = np.atleast_2d(sigma2)
+
+ assert mu1.shape == mu2.shape, \
+ 'Training and test mean vectors have different lengths'
+ assert sigma1.shape == sigma2.shape, \
+ 'Training and test covariances have different dimensions'
+
+ diff = mu1 - mu2
+
+ # Product might be almost singular
+ covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
+ if not np.isfinite(covmean).all():
+ msg = ('fid calculation produces singular product; '
+ 'adding %s to diagonal of cov estimates') % eps
+ print(msg)
+ offset = np.eye(sigma1.shape[0]) * eps
+ covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
+
+ # Numerical error might give slight imaginary component
+ if np.iscomplexobj(covmean):
+ if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
+ m = np.max(np.abs(covmean.imag))
+ raise ValueError('Imaginary component {}'.format(m))
+ covmean = covmean.real
+
+ tr_covmean = np.trace(covmean)
+
+ return (diff.dot(diff) + np.trace(sigma1) +
+ np.trace(sigma2) - 2 * tr_covmean)
+
+
+def calculate_keyframe_error(keyframe_error, num_keyframes):
+ batch_size = keyframe_error.shape[0]
+ mean_err_keyframe = torch.tensor([keyframe_error[i, :num_keyframes[i]].mean() for i in range(batch_size)])
+ return mean_err_keyframe.mean()
+
+
+def calculate_trajectory_error(dist_error, num_keyframes, strict=True):
+ ''' dist_error shape [5]: error for each kps in metre
+ Two threshold: 20 cm and 50 cm.
+ If mean error in sequence is more then the threshold, fails
+ return: traj_fail(0.2), traj_fail(0.5), all_kps_fail(0.2), all_kps_fail(0.5), all_mean_err.
+ Every metrics are already averaged.
+ '''
+ # mean_err_traj = dist_error.mean(1)
+ batch_size = dist_error.shape[0]
+ mean_err_traj = torch.tensor([dist_error[i, :num_keyframes[i]].mean() for i in range(batch_size)])
+ if strict:
+ # Traj fails if any of the key frame fails
+ traj_fail_02 = 1.0 - (dist_error <= 0.2).all(1).sum() / dist_error.shape[0]
+ traj_fail_05 = 1.0 - (dist_error <= 0.5).all(1).sum() / dist_error.shape[0]
+ else:
+ # Traj fails if the mean error of all keyframes more than the threshold
+ traj_fail_02 = (mean_err_traj > 0.2).sum() / dist_error.shape[0]
+ traj_fail_05 = (mean_err_traj > 0.5).sum() / dist_error.shape[0]
+ all_fail_02 = (dist_error > 0.2).sum() / (dist_error >= 0).sum()
+ all_fail_05 = (dist_error > 0.5).sum() / (dist_error >= 0).sum()
+
+ # out = {"traj_fail_02": traj_fail_02,
+ # "traj_fail_05": traj_fail_05,
+ # "all_fail_02": all_fail_02,
+ # "all_fail_05": all_fail_05,
+ # "all_mean_err": dist_error.mean()}
+ return np.array([traj_fail_02, traj_fail_05, all_fail_02, all_fail_05, mean_err_traj.mean()])
+
+
+def calculate_trajectory_diversity(trajectories, lengths):
+ ''' Standard diviation of point locations in the trajectories
+ Args:
+ trajectories: [bs, rep, 196, 2]
+ lengths: [bs]
+ '''
+ # [32, 2, 196, 2 (xz)]
+ # mean_trajs = trajectories.mean(1, keepdims=True)
+ # dist_to_mean = np.linalg.norm(trajectories - mean_trajs, axis=3)
+ def traj_div(traj, length):
+ # traj [rep, 196, 2]
+ # length (int)
+ traj = traj[:, :length, :]
+ # point_var = traj.var(axis=0, keepdims=True).mean()
+ # point_var = np.sqrt(point_var)
+ # return point_var
+
+ mean_traj = traj.mean(axis=0, keepdims=True)
+ dist = np.sqrt(((traj - mean_traj)**2).sum(axis=2))
+ rms_dist = np.sqrt((dist**2).mean())
+ return rms_dist
+
+ div = []
+ for i in range(len(trajectories)):
+ div.append(traj_div(trajectories[i], lengths[i]))
+ return np.array(div).mean()
+
+def calculate_skating_ratio(motions):
+ thresh_height = 0.05 # 10
+ fps = 20.0
+ thresh_vel = 0.50 # 20 cm /s
+ avg_window = 5 # frames
+
+ batch_size = motions.shape[0]
+ # 10 left, 11 right foot. XZ plane, y up
+ # motions [bs, 22, 3, max_len]
+ verts_feet = motions[:, [10, 11], :, :].detach().cpu().numpy() # [bs, 2, 3, max_len]
+ verts_feet_plane_vel = np.linalg.norm(verts_feet[:, :, [0, 2], 1:] - verts_feet[:, :, [0, 2], :-1], axis=2) * fps # [bs, 2, max_len-1]
+ # [bs, 2, max_len-1]
+ vel_avg = uniform_filter1d(verts_feet_plane_vel, axis=-1, size=avg_window, mode='constant', origin=0)
+
+ verts_feet_height = verts_feet[:, :, 1, :] # [bs, 2, max_len]
+ # If feet touch ground in agjecent frames
+ feet_contact = np.logical_and((verts_feet_height[:, :, :-1] < thresh_height), (verts_feet_height[:, :, 1:] < thresh_height)) # [bs, 2, max_len - 1]
+ # skate velocity
+ skate_vel = feet_contact * vel_avg
+
+ # it must both skating in the current frame
+ skating = np.logical_and(feet_contact, (verts_feet_plane_vel > thresh_vel))
+ # and also skate in the windows of frames
+ skating = np.logical_and(skating, (vel_avg > thresh_vel))
+
+ # Both feet slide
+ skating = np.logical_or(skating[:, 0, :], skating[:, 1, :]) # [bs, max_len -1]
+ skating_ratio = np.sum(skating, axis=1) / skating.shape[1]
+
+ return skating_ratio, skate_vel
+
+ # verts_feet_gt = markers_got[:, [16, 47], :].detach().cpu().numpy() # [119, 2, 3] heels
+ # verts_feet_horizon_vel_gt = np.linalg.norm(verts_feet_gt[1:, :, :-1] - verts_feet_gt[:-1, :, :-1], axis=-1) * 30
+
+ # verts_feet_height_gt = verts_feet_gt[:, :, -1][0:-1] # [118,2]
+ # min_z = markers_gt[:, :, 2].min().detach().cpu().numpy()
+ # verts_feet_height_gt = verts_feet_height_gt - min_z
+
+ # skating_gt = (verts_feet_horizon_vel_gt > thresh_vel) * (verts_feet_height_gt < thresh_height)
+ # skating_gt = np.sum(np.logival_and(skating_gt[:, 0], skating_gt[:, 1])) / 118
+ # skating_gt_list.append(skating_gt)
diff --git a/data_loaders/custom/utils/paramUtil.py b/data_loaders/custom/utils/paramUtil.py
new file mode 100644
index 0000000..bcd0014
--- /dev/null
+++ b/data_loaders/custom/utils/paramUtil.py
@@ -0,0 +1,71 @@
+"""
+Parameters for the reference skeleton.
+
+Each skeleton must have:
+- kinematic chain: list of lists that reflect joint hierarchy
+- raw offsets: np.array of relative positions to parent node in [x, y, z] order
+- tgt_skel_id: serial number of the file to read the example skeleton from
+"""
+
+import numpy as np
+
+# Define a kinematic tree for the skeletal struture
+t2m_raw_offsets = np.array([[0,0,0],
+ [1,0,0],
+ [-1,0,0],
+ [0,1,0],
+ [0,-1,0],
+ [0,-1,0],
+ [0,1,0],
+ [0,-1,0],
+ [0,-1,0],
+ [0,1,0],
+ [0,0,1],
+ [0,0,1],
+ [0,1,0],
+ [1,0,0],
+ [-1,0,0],
+ [0,0,1],
+ [0,-1,0],
+ [0,-1,0],
+ [0,-1,0],
+ [0,-1,0],
+ [0,-1,0],
+ [0,-1,0]])
+
+t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]]
+
+t2m_tgt_skel_id = '000021'
+
+custom_kinematic_chain = [[0, 1, 2, 3, 4, 5, 6], [1, 7, 8, 9, 10, 11], [1, 12, 13, 14, 15, 16], [13, 17, 18, 19, 20, 21], [13, 22, 23, 24, 25, 26]]
+custom_raw_offsets = np.array(
+ [
+ [ 0, 0, 0],
+ [ 0, 1, 0],
+ [ 1, 0, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [-1, 0, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0, 1, 0],
+ [ 0, 1, 0],
+ [ 0, 1, 0],
+ [ 0, 1, 0],
+ [ 0, 1, 0],
+ [-1, 0, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 1, 0, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0],
+ [ 0,-1, 0]
+ ]
+)
\ No newline at end of file
diff --git a/data_loaders/custom/utils/plot_script.py b/data_loaders/custom/utils/plot_script.py
new file mode 100644
index 0000000..1e225c1
--- /dev/null
+++ b/data_loaders/custom/utils/plot_script.py
@@ -0,0 +1,247 @@
+import math
+import numpy as np
+import matplotlib
+import matplotlib.pyplot as plt
+from mpl_toolkits.mplot3d import Axes3D
+from matplotlib.patches import Circle
+import mpl_toolkits.mplot3d.art3d as art3d
+from matplotlib.animation import FuncAnimation, FFMpegFileWriter
+from mpl_toolkits.mplot3d.art3d import Poly3DCollection
+import mpl_toolkits.mplot3d.axes3d as p3
+# import cv2
+from textwrap import wrap
+
+
+def list_cut_average(ll, intervals):
+ if intervals == 1:
+ return ll
+
+ bins = math.ceil(len(ll) * 1.0 / intervals)
+ ll_new = []
+ for i in range(bins):
+ l_low = intervals * i
+ l_high = l_low + intervals
+ l_high = l_high if l_high < len(ll) else len(ll)
+ ll_new.append(np.mean(ll[l_low:l_high]))
+ return ll_new
+
+
+def test_plot_circle():
+ # matplotlib.use('Agg')
+ fig = plt.figure(figsize=(3, 3))
+ plt.tight_layout()
+ # ax = p3.Axes3D(fig)
+ ax = fig.add_subplot(111, projection="3d")
+
+ x_c = 1
+ y_c = 0.1
+ z_c = 1
+ r = 2
+
+ theta = np.linspace(0, 2 * np.pi, 300) # 300 points on the circle
+ x = x_c + r * np.sin(theta)
+ y = y_c + theta * 0.0
+ z = z_c + r * np.cos(theta)
+ import pdb; pdb.set_trace()
+ ax.plot3D(x, y, z, color="red")
+ plt.show()
+
+ return
+
+
+def plot_3d_motion(save_path, kinematic_tree, joints, title, dataset, figsize=(3, 3), fps=120, radius=3,
+ vis_mode='default', gt_frames=[], traj_only=False, target_pose=None, kframes=[], obs_list=[]):
+ matplotlib.use('Agg')
+
+ title = '\n'.join(wrap(title, 20))
+
+ def init():
+ ax.set_xlim3d([-radius / 2, radius / 2])
+ ax.set_ylim3d([0, radius])
+ ax.set_zlim3d([-radius / 3., radius * 2 / 3.])
+ # print(title)
+ fig.suptitle(title, fontsize=10)
+ ax.grid(b=False)
+
+ def plot_xzPlane(minx, maxx, miny, minz, maxz):
+ ## Plot a plane XZ
+ verts = [
+ [minx, miny, minz],
+ [minx, miny, maxz],
+ [maxx, miny, maxz],
+ [maxx, miny, minz]
+ ]
+ xz_plane = Poly3DCollection([verts])
+ xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5))
+ ax.add_collection3d(xz_plane)
+
+ def plot_trajectory(trajec_idx):
+ ax.plot3D([0 - trajec_idx[0], 0 - trajec_idx[0]], [0.2, 0.2], [0 - trajec_idx[1], 1 - trajec_idx[1]], color="red") # (x,y,z)
+
+ def plot_ref_axes(trajec_idx):
+ '''
+ trajec_idx contains (x,z) coordinate of the root of the current frame.
+ Need to offset the reference axes because the plot is root-centered
+ '''
+ ax.plot3D([0 - trajec_idx[0], 0 - trajec_idx[0]], [0.2, 0.2], [0 - trajec_idx[1], 1 - trajec_idx[1]], color="red") # (x,y,z)
+ ax.plot3D([0 - trajec_idx[0], 1 - trajec_idx[0]], [0.2, 0.2], [0 - trajec_idx[1], 0 - trajec_idx[1]], color="yellow") # (x,y,z)
+
+ def plot_ground_target(trajec_idx):
+ # kframes = [(30, (0.0, 3.0)),
+ # (45, (1.5, 3.0)),
+ # (60, (3.0, 3.0)),
+ # (75, (3.0, 1.5)),
+ # (90, (3.0, 0.0)),
+ # (105, (1.5, 0.0)),
+ # (120, (0.0, 0.0))
+ # ]
+ pp = [(bb[0] * 1.3, bb[1] * 1.3) for (aa, bb) in kframes]
+ for i in range(len(pp)):
+ ax.plot3D([pp[i][0] - trajec_idx[0], pp[i][0] - trajec_idx[0]], [0.0, 0.1], [pp[i][1] - trajec_idx[1], pp[i][1] - trajec_idx[1]], color="blue") # (x,y,z)
+
+ def plot_obstacles(trajec_idx):
+ for i in range(len(obs_scale)):
+ x_c = obs_scale[i][0][0] - trajec_idx[0]
+ y_c = 0.1
+ z_c = obs_scale[i][0][1] - trajec_idx[1]
+ r = obs_scale[i][1]
+ # Draw circle
+ theta = np.linspace(0, 2 * np.pi, 300) # 300 points on the circle
+ x = x_c + r * np.sin(theta)
+ y = y_c + theta * 0.0
+ z = z_c + r * np.cos(theta)
+ ax.plot3D(x, y, z, color="red") # linewidth=2.0
+
+ def plot_target_pose(target_pose, frame_idx, cur_root_loc, used_colors, kinematic_tree):
+ # The target pose is re-centered in every frame because the plot is root-centered
+ # used_colors = colors_blue if index in gt_frames else colors
+ for target_frame in frame_idx:
+ for i, (chain, color) in enumerate(zip(kinematic_tree, used_colors)):
+ if i < 5:
+ linewidth = 4.0
+ else:
+ linewidth = 2.0
+ # print("i = ", i, data[index, chain, 0], data[index, chain, 1], data[index, chain, 2])
+ ax.plot3D(target_pose[target_frame, chain, 0] - cur_root_loc[0],
+ target_pose[target_frame, chain, 1],
+ target_pose[target_frame, chain, 2] - cur_root_loc[2],
+ linewidth=linewidth, color=color)
+
+
+ # (seq_len, joints_num, 3)
+ data = joints.copy().reshape(len(joints), -1, 3)
+ if target_pose is None:
+ target_pose = np.zeros_like(data)
+
+ # preparation related to specific datasets
+ if dataset == 'kit':
+ data *= 0.003 # scale for visualization
+ target_pose *= 0.003
+ elif dataset == 'humanml':
+ data *= 1.3 # scale for visualization
+ target_pose *= 1.3
+ obs_scale = [((loc[0] * 1.3, loc[1] * 1.3), rr * 1.3) for (loc, rr) in obs_list]
+ elif dataset in ['humanact12', 'uestc']:
+ data *= -1.5 # reverse axes, scale for visualization
+ target_pose *= -1.5
+
+ fig = plt.figure(figsize=figsize)
+ plt.tight_layout()
+ # ax = p3.Axes3D(fig)
+ ax = fig.add_subplot(111, projection="3d")
+ init()
+ MINS = data.min(axis=0).min(axis=0)
+ MAXS = data.max(axis=0).max(axis=0)
+ colors_blue = ["#4D84AA", "#5B9965", "#61CEB9", "#34C1E2", "#80B79A"] # GT color
+ colors_orange = ["#DD5A37", "#D69E00", "#B75A39", "#FF6D00", "#DDB50E"] # Generation color
+ colors = colors_orange
+ if vis_mode == 'lower_body': # lower body taken fixed to input motion
+ colors[0] = colors_blue[0]
+ colors[1] = colors_blue[1]
+ elif vis_mode == 'gt':
+ colors = colors_blue
+
+ frame_number = data.shape[0]
+ # print(dataset.shape)
+
+ height_offset = MINS[1]
+ data[:, :, 1] -= height_offset
+ target_pose[:, :, 1] -= height_offset
+ trajec = data[:, 0, [0, 2]]
+
+ # Data is root-centered in every frame
+ data_copy = data.copy()
+ data[..., 0] -= data[:, 0:1, 0]
+ data[..., 2] -= data[:, 0:1, 2]
+ # Center first frame of target pose
+ # target_pose[:, :, 0] -= data_copy[0:1, :, 0]
+ # target_pose[:, :, 2] -= data_copy[0:1, :, 2]
+
+ # print(trajec.shape)
+
+ def update(index):
+ ax.clear()
+ # print(index)
+ # ax.lines = []
+ # ax.collections = []
+ ax.view_init(elev=120, azim=-90)
+ ax.dist = 7.5
+ # ax =
+ plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1],
+ MAXS[2] - trajec[index, 1])
+
+ plot_obstacles(trajec[index])
+ plot_ground_target(trajec[index])
+
+ # ax.scatter(dataset[index, :22, 0], dataset[index, :22, 1], dataset[index, :22, 2], color='black', s=3)
+
+ # if index > 1:
+ # ax.plot3D(trajec[:index, 0] - trajec[index, 0], np.zeros_like(trajec[:index, 0]),
+ # trajec[:index, 1] - trajec[index, 1], linewidth=1.0,
+ # color='blue')
+ # # ax = plot_xzPlane(ax, MINS[0], MAXS[0], 0, MINS[2], MAXS[2])
+
+ # TODO: if GMD:
+ # Now only use orange color. Blue color is used for ground truth condition
+ # used_colors = colors_orange
+
+ used_colors = colors_blue if index in gt_frames else colors
+
+ for i, (chain, color) in enumerate(zip(kinematic_tree, used_colors)):
+ if i < 5:
+ linewidth = 4.0
+ else:
+ linewidth = 2.0
+ # print("i = ", i, data[index, chain, 0], data[index, chain, 1], data[index, chain, 2])
+ ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth,
+ color=color)
+ # print(trajec[:index, 0].shape)
+ if traj_only:
+ ax.scatter(data[index, 0, 0], data[index, 0, 1], data[index, 0, 2], color=color)
+ # Test plot trajectory
+ # plot_trajectory(trajec[index])
+
+ def plot_root_horizontal():
+ ax.plot3D(trajec[:index, 0] - trajec[index, 0], np.zeros_like(trajec[:index, 1]), trajec[:index, 1] - trajec[index, 1], linewidth=2.0,
+ color=used_colors[0])
+
+ # plot_ref_axes(trajec[index])
+
+ plot_root_horizontal()
+
+
+ plot_target_pose(target_pose, gt_frames, data_copy[index, 0, :], colors_blue, kinematic_tree)
+
+ plt.axis('off')
+ ax.set_xticklabels([])
+ ax.set_yticklabels([])
+ ax.set_zticklabels([])
+
+ ani = FuncAnimation(fig, update, frames=frame_number, interval=1000 / fps, repeat=False)
+
+ # writer = FFMpegFileWriter(fps=fps)
+ ani.save(save_path, fps=fps)
+ # ani = FuncAnimation(fig, update, frames=frame_number, interval=1000 / fps, repeat=False, init_func=init)
+ # ani.save(save_path, writer='pillow', fps=1000 / fps)
+
+ plt.close()
diff --git a/data_loaders/custom/utils/plotting.py b/data_loaders/custom/utils/plotting.py
new file mode 100644
index 0000000..92eb26d
--- /dev/null
+++ b/data_loaders/custom/utils/plotting.py
@@ -0,0 +1,145 @@
+import numpy as np
+import os
+import numpy as np
+import utils.paramUtil as paramUtil
+from utils.plot_script import plot_3d_motion
+from argparse import ArgumentParser
+
+
+def plot_samples(motions, gt_motions, lengths, texts, out_path, all_observed_masks=None):
+ fps = 10 # TODO: only for debugging purposes, reduce fps. Remove line later.
+ skeleton = paramUtil.t2m_kinematic_chain
+ for sample_i in range(motions.shape[0]):
+ caption = 'GT Motion - {}'.format(texts[sample_i])
+ length = int(lengths[sample_i])
+ motion = gt_motions[sample_i].numpy().transpose(2, 0, 1)[:length]
+ save_file = 'gt_motion{:02d}.mp4'.format(sample_i)
+ animation_save_path = os.path.join(out_path, save_file)
+ rep_files = [animation_save_path]
+ print(f'[({sample_i}) "{caption}" | -> {save_file}]')
+ plot_3d_motion(animation_save_path, skeleton, motion, title=caption,
+ dataset='humanml', fps=fps, vis_mode='gt')
+
+ caption = 'Sample - {}'.format(texts[sample_i])
+ motion = motions[sample_i].numpy().transpose(2, 0, 1)[:length]
+ save_file = 'sample{:02d}_rep{:02d}.mp4'.format(sample_i, 0)
+ animation_save_path = os.path.join(out_path, save_file)
+ rep_files.append(animation_save_path)
+ print(f'[({sample_i}) "{caption}" -> {save_file}]')
+
+ gt_frames = np.where(all_observed_masks[sample_i, 0, 0, :])[0] if all_observed_masks is not None else []
+ plot_3d_motion(animation_save_path, skeleton, motion, title=caption,
+ dataset='humanml', fps=fps, vis_mode='in_between', gt_frames=gt_frames)
+
+ all_rep_save_file = os.path.join(out_path, 'sample{:02d}.mp4'.format(sample_i))
+ ffmpeg_rep_files = [f' -i {f} ' for f in rep_files]
+ hstack_args = f' -filter_complex hstack=inputs={1+1}'
+ ffmpeg_rep_cmd = f'ffmpeg -y -loglevel warning ' + ''.join(ffmpeg_rep_files) + f'{hstack_args} {all_rep_save_file}'
+ os.system(ffmpeg_rep_cmd)
+ print(f'[({sample_i}) "{caption}" | all repetitions | -> {all_rep_save_file}]')
+ abs_path = os.path.abspath(out_path)
+ print(f'[Done] Results are at [{abs_path}]')
+
+
+def plot_sample(motions, gt_motions, lengths, out_path):
+ fps = 10 # TODO: only for debugging purposes, reduce fps. Remove line later.
+ skeleton = paramUtil.t2m_kinematic_chain
+ for idx in range(motions.shape[0]):
+ save_path = os.path.join(out_path, f'sample_{idx}.mp4')
+ length = int(lengths[idx])
+ motion = motions[idx].numpy().transpose(2, 0, 1)[:length]
+ gt_motion = gt_motions[idx].numpy().transpose(2, 0, 1)[:length]
+ plot_3d_motion(save_path, skeleton, motion, dataset='humanml', title='Sampled Motion', fps=fps)
+ plot_3d_motion(save_path, skeleton, gt_motion, dataset='humanml', title='GT Motion', fps=fps)
+
+
+def plot_conditional_samples(motion, lengths, texts, observed_motion, observed_mask, num_samples, num_repetitions, out_path, edit_mode='benchmark_sparse', stop_imputation_at=0):
+ '''
+ Used to plot samples during conditionally keyframed training.
+ Arguments:
+ motion {torch.Tensor} -- sampled batch of motions (nreps, nsamples, 22, 3, nframes)
+ lengths {torch.Tensor} -- motion lengths (nreps, nsamples)
+ texts {torch.Tensor} -- texts of motions (nreps * nsamples)
+ observed_motion {torch.Tensor} -- ground-truth motions (nsamples, 22, 3, nframes)
+ observed_mask {torch.Tensor} -- keyframes mask (nsamples, 22, 3, nframes)
+ cutoff {int} -- if any replacement, set cutoff to 0 otherwise a value larger than 0
+ Returns:
+ matplotlib.pyplot.subplots -- figure
+ '''
+
+ dataset = 'humanml'
+ batch_size = num_samples
+
+ fps = 10 # TODO: only for debugging purposes, reduce fps. Remove line later.
+ skeleton = paramUtil.t2m_kinematic_chain
+ for sample_i in range(num_samples):
+ caption = 'Input Motion'
+ length = lengths[0, sample_i]
+ gt_motion = observed_motion[sample_i].transpose(2, 0, 1)[:length]
+ save_file = 'input_motion{:02d}.mp4'.format(sample_i)
+ animation_save_path = os.path.join(out_path, save_file)
+ rep_files = [animation_save_path]
+ print(f'[({sample_i}) "{caption}" | -> {save_file}]')
+ plot_3d_motion(animation_save_path, skeleton, gt_motion, title=caption,
+ dataset=dataset, fps=fps, vis_mode='gt',
+ gt_frames=np.where(observed_mask[sample_i, 0, 0, :])[0])
+ for rep_i in range(num_repetitions):
+ caption = texts[rep_i * batch_size + sample_i]
+ if caption == '':
+ caption = 'Edit [{}] unconditioned'.format(edit_mode)
+ else:
+ caption = 'Edit [{}]: {}'.format(edit_mode, caption)
+ length = lengths[rep_i, sample_i]
+ gen_motion = motion[rep_i, sample_i].transpose(2, 0, 1)[:length]
+ save_file = 'sample{:02d}_rep{:02d}.mp4'.format(sample_i, rep_i)
+ animation_save_path = os.path.join(out_path, save_file)
+ rep_files.append(animation_save_path)
+ print(f'[({sample_i}) "{caption}" | Rep #{rep_i} | -> {save_file}]')
+ vis_mode = edit_mode if edit_mode in ['upper_body', 'pelvis', 'right_wrist', 'pelvis_feet', 'pelvis_vr'] else 'benchmark_sparse'
+ gt_frames = [] if edit_mode in ['upper_body', 'pelvis', 'right_wrist', 'pelvis_feet', 'pelvis_vr'] else np.where(observed_mask[sample_i, 0, 0, :])[0]
+ plot_3d_motion(animation_save_path, skeleton, gen_motion, title=caption,
+ dataset=dataset, fps=fps, vis_mode=vis_mode,
+ gt_frames=gt_frames)
+
+ # Credit for visualization: https://github.com/EricGuo5513/text-to-motion
+
+ all_rep_save_file = os.path.join(out_path, 'sample{:02d}.mp4'.format(sample_i))
+ ffmpeg_rep_files = [f' -i {f} ' for f in rep_files]
+ hstack_args = f' -filter_complex hstack=inputs={num_repetitions+1}'
+ ffmpeg_rep_cmd = f'ffmpeg -y -loglevel warning ' + ''.join(ffmpeg_rep_files) + f'{hstack_args} {all_rep_save_file}'
+ os.system(ffmpeg_rep_cmd)
+ print(f'[({sample_i}) "{caption}" | all repetitions | -> {all_rep_save_file}]')
+
+ abs_path = os.path.abspath(out_path)
+ print(f'[Done] Results are at [{abs_path}]')
+
+
+if __name__ == "__main__":
+ parser = ArgumentParser()
+ parser.add_argument("--saved_results_dir", type=str, required=True)
+ parser.add_argument("--output_dir", type=str, required=True)
+ args = parser.parse_args()
+
+ results = np.load(os.path.join(args.saved_results_dir, 'results.npy'), allow_pickle=True).item()
+
+ motion = results['motion']
+ texts = results['text']
+ lengths = results['lengths']
+ num_samples = results['num_samples']
+ num_repetitions = results['num_repetitions']
+ observed_motion = results['observed_motion']
+ observed_mask = results['observed_mask']
+
+ if not os.path.exists(args.output_dir):
+ os.makedirs(args.output_dir)
+
+ plot_conditional_samples(motion=results['motion'],
+ lengths=results['lengths'],
+ texts=results['text'],
+ observed_motion=results['observed_motion'],
+ observed_mask=results['observed_mask'],
+ num_samples=results['num_samples'],
+ num_repetitions=results['num_repetitions'],
+ out_path=args.output_dir,
+ edit_mode='benchmark_sparse', #FIXME: only works for selected edit modes.
+ cutoff=0) #FIXME: set to 0 for now to always replace with ground-truth keyframes --> mainly for visualization purposes.
diff --git a/data_loaders/custom/utils/utils.py b/data_loaders/custom/utils/utils.py
new file mode 100644
index 0000000..8ffd2b4
--- /dev/null
+++ b/data_loaders/custom/utils/utils.py
@@ -0,0 +1,168 @@
+import os
+import numpy as np
+# import cv2
+from PIL import Image
+from utils import paramUtil
+import math
+import time
+import matplotlib.pyplot as plt
+from scipy.ndimage import gaussian_filter
+
+
+def mkdir(path):
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+COLORS = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
+ [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
+ [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
+
+MISSING_VALUE = -1
+
+def save_image(image_numpy, image_path):
+ img_pil = Image.fromarray(image_numpy)
+ img_pil.save(image_path)
+
+
+def save_logfile(log_loss, save_path):
+ with open(save_path, 'wt') as f:
+ for k, v in log_loss.items():
+ w_line = k
+ for digit in v:
+ w_line += ' %.3f' % digit
+ f.write(w_line + '\n')
+
+
+def print_current_loss(start_time, niter_state, losses, epoch=None, sub_epoch=None,
+ inner_iter=None, tf_ratio=None, sl_steps=None):
+
+ def as_minutes(s):
+ m = math.floor(s / 60)
+ s -= m * 60
+ return '%dm %ds' % (m, s)
+
+ def time_since(since, percent):
+ now = time.time()
+ s = now - since
+ es = s / percent
+ rs = es - s
+ return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
+
+ if epoch is not None:
+ print('epoch: %3d niter: %6d sub_epoch: %2d inner_iter: %4d' % (epoch, niter_state, sub_epoch, inner_iter), end=" ")
+
+ # message = '%s niter: %d completed: %3d%%)' % (time_since(start_time, niter_state / total_niters),
+ # niter_state, niter_state / total_niters * 100)
+ now = time.time()
+ message = '%s'%(as_minutes(now - start_time))
+
+ for k, v in losses.items():
+ message += ' %s: %.4f ' % (k, v)
+ message += ' sl_length:%2d tf_ratio:%.2f'%(sl_steps, tf_ratio)
+ print(message)
+
+def print_current_loss_decomp(start_time, niter_state, total_niters, losses, epoch=None, inner_iter=None):
+
+ def as_minutes(s):
+ m = math.floor(s / 60)
+ s -= m * 60
+ return '%dm %ds' % (m, s)
+
+ def time_since(since, percent):
+ now = time.time()
+ s = now - since
+ es = s / percent
+ rs = es - s
+ return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
+
+ print('epoch: %03d inner_iter: %5d' % (epoch, inner_iter), end=" ")
+ # now = time.time()
+ message = '%s niter: %07d completed: %3d%%)'%(time_since(start_time, niter_state / total_niters), niter_state, niter_state / total_niters * 100)
+ for k, v in losses.items():
+ message += ' %s: %.4f ' % (k, v)
+ print(message)
+
+
+def compose_gif_img_list(img_list, fp_out, duration):
+ img, *imgs = [Image.fromarray(np.array(image)) for image in img_list]
+ img.save(fp=fp_out, format='GIF', append_images=imgs, optimize=False,
+ save_all=True, loop=0, duration=duration)
+
+
+def save_images(visuals, image_path):
+ if not os.path.exists(image_path):
+ os.makedirs(image_path)
+
+ for i, (label, img_numpy) in enumerate(visuals.items()):
+ img_name = '%d_%s.jpg' % (i, label)
+ save_path = os.path.join(image_path, img_name)
+ save_image(img_numpy, save_path)
+
+
+def save_images_test(visuals, image_path, from_name, to_name):
+ if not os.path.exists(image_path):
+ os.makedirs(image_path)
+
+ for i, (label, img_numpy) in enumerate(visuals.items()):
+ img_name = "%s_%s_%s" % (from_name, to_name, label)
+ save_path = os.path.join(image_path, img_name)
+ save_image(img_numpy, save_path)
+
+
+def compose_and_save_img(img_list, save_dir, img_name, col=4, row=1, img_size=(256, 200)):
+ # print(col, row)
+ compose_img = compose_image(img_list, col, row, img_size)
+ if not os.path.exists(save_dir):
+ os.makedirs(save_dir)
+ img_path = os.path.join(save_dir, img_name)
+ # print(img_path)
+ compose_img.save(img_path)
+
+
+def compose_image(img_list, col, row, img_size):
+ to_image = Image.new('RGB', (col * img_size[0], row * img_size[1]))
+ for y in range(0, row):
+ for x in range(0, col):
+ from_img = Image.fromarray(img_list[y * col + x])
+ # print((x * img_size[0], y*img_size[1],
+ # (x + 1) * img_size[0], (y + 1) * img_size[1]))
+ paste_area = (x * img_size[0], y*img_size[1],
+ (x + 1) * img_size[0], (y + 1) * img_size[1])
+ to_image.paste(from_img, paste_area)
+ # to_image[y*img_size[1]:(y + 1) * img_size[1], x * img_size[0] :(x + 1) * img_size[0]] = from_img
+ return to_image
+
+
+def plot_loss_curve(losses, save_path, intervals=500):
+ plt.figure(figsize=(10, 5))
+ plt.title("Loss During Training")
+ for key in losses.keys():
+ plt.plot(list_cut_average(losses[key], intervals), label=key)
+ plt.xlabel("Iterations/" + str(intervals))
+ plt.ylabel("Loss")
+ plt.legend()
+ plt.savefig(save_path)
+ plt.show()
+
+
+def list_cut_average(ll, intervals):
+ if intervals == 1:
+ return ll
+
+ bins = math.ceil(len(ll) * 1.0 / intervals)
+ ll_new = []
+ for i in range(bins):
+ l_low = intervals * i
+ l_high = l_low + intervals
+ l_high = l_high if l_high < len(ll) else len(ll)
+ ll_new.append(np.mean(ll[l_low:l_high]))
+ return ll_new
+
+
+def motion_temporal_filter(motion, sigma=1):
+ motion = motion.reshape(motion.shape[0], -1)
+ # print(motion.shape)
+ for i in range(motion.shape[1]):
+ motion[:, i] = gaussian_filter(motion[:, i], sigma=sigma, mode="nearest")
+ return motion.reshape(motion.shape[0], -1, 3)
+
diff --git a/data_loaders/custom/utils/word_vectorizer.py b/data_loaders/custom/utils/word_vectorizer.py
new file mode 100644
index 0000000..68c5956
--- /dev/null
+++ b/data_loaders/custom/utils/word_vectorizer.py
@@ -0,0 +1,80 @@
+import numpy as np
+import pickle
+from os.path import join as pjoin
+
+POS_enumerator = {
+ 'VERB': 0,
+ 'NOUN': 1,
+ 'DET': 2,
+ 'ADP': 3,
+ 'NUM': 4,
+ 'AUX': 5,
+ 'PRON': 6,
+ 'ADJ': 7,
+ 'ADV': 8,
+ 'Loc_VIP': 9,
+ 'Body_VIP': 10,
+ 'Obj_VIP': 11,
+ 'Act_VIP': 12,
+ 'Desc_VIP': 13,
+ 'OTHER': 14,
+}
+
+Loc_list = ('left', 'right', 'clockwise', 'counterclockwise', 'anticlockwise', 'forward', 'back', 'backward',
+ 'up', 'down', 'straight', 'curve')
+
+Body_list = ('arm', 'chin', 'foot', 'feet', 'face', 'hand', 'mouth', 'leg', 'waist', 'eye', 'knee', 'shoulder', 'thigh')
+
+Obj_List = ('stair', 'dumbbell', 'chair', 'window', 'floor', 'car', 'ball', 'handrail', 'baseball', 'basketball')
+
+Act_list = ('walk', 'run', 'swing', 'pick', 'bring', 'kick', 'put', 'squat', 'throw', 'hop', 'dance', 'jump', 'turn',
+ 'stumble', 'dance', 'stop', 'sit', 'lift', 'lower', 'raise', 'wash', 'stand', 'kneel', 'stroll',
+ 'rub', 'bend', 'balance', 'flap', 'jog', 'shuffle', 'lean', 'rotate', 'spin', 'spread', 'climb')
+
+Desc_list = ('slowly', 'carefully', 'fast', 'careful', 'slow', 'quickly', 'happy', 'angry', 'sad', 'happily',
+ 'angrily', 'sadly')
+
+VIP_dict = {
+ 'Loc_VIP': Loc_list,
+ 'Body_VIP': Body_list,
+ 'Obj_VIP': Obj_List,
+ 'Act_VIP': Act_list,
+ 'Desc_VIP': Desc_list,
+}
+
+
+class WordVectorizer(object):
+ def __init__(self, meta_root, prefix):
+ vectors = np.load(pjoin(meta_root, '%s_data.npy'%prefix))
+ words = pickle.load(open(pjoin(meta_root, '%s_words.pkl'%prefix), 'rb'))
+ word2idx = pickle.load(open(pjoin(meta_root, '%s_idx.pkl'%prefix), 'rb'))
+ self.word2vec = {w: vectors[word2idx[w]] for w in words}
+
+ def _get_pos_ohot(self, pos):
+ pos_vec = np.zeros(len(POS_enumerator))
+ if pos in POS_enumerator:
+ pos_vec[POS_enumerator[pos]] = 1
+ else:
+ pos_vec[POS_enumerator['OTHER']] = 1
+ return pos_vec
+
+ def __len__(self):
+ return len(self.word2vec)
+
+ def __getitem__(self, item):
+ word, pos = item.split('/')
+ if word in self.word2vec:
+ word_vec = self.word2vec[word]
+ vip_pos = None
+ for key, values in VIP_dict.items():
+ if word in values:
+ vip_pos = key
+ break
+ if vip_pos is not None:
+ pos_vec = self._get_pos_ohot(vip_pos)
+ else:
+ pos_vec = self._get_pos_ohot(pos)
+ else:
+ word_vec = self.word2vec['unk']
+ pos_vec = self._get_pos_ohot('OTHER')
+ return word_vec, pos_vec
\ No newline at end of file
diff --git a/data_loaders/custom_utils.py b/data_loaders/custom_utils.py
new file mode 100644
index 0000000..08039e0
--- /dev/null
+++ b/data_loaders/custom_utils.py
@@ -0,0 +1,102 @@
+import numpy as np
+
+from data_loaders.custom.scripts.motion_process import fid_l, fid_r
+
+RIG_JOINT_NAMES = [
+ "Root",
+ "Spine",
+ "RightUpLeg",
+ "RightLeg",
+ "RightFoot",
+ "RightToe",
+ "RightToe_end",
+ "LeftUpLeg",
+ "LeftLeg",
+ "LeftFoot",
+ "LeftToe",
+ "LeftToe_end",
+ "Spine1",
+ "Spine2",
+ "Neck",
+ "Head",
+ "Head_end",
+ "LeftShoulder",
+ "LeftArm",
+ "LeftForeArm",
+ "LeftHand",
+ "LeftHand_end",
+ "RightShoulder",
+ "RightArm",
+ "RightForeArm",
+ "RightHand",
+ "RightHand_end"
+]
+
+NUM_RIG_JOINTS = len(RIG_JOINT_NAMES) # joints in the custom rig
+NUM_RIG_FEATURES = 12 * NUM_RIG_JOINTS - 1 # precalculate the features needed for this rig
+
+RIG_LOWER_BODY_JOINTS = [RIG_JOINT_NAMES.index(name) for name in ['Root', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToe', 'RightToe_end', 'LeftUpLeg', 'LeftLeg', 'LeftFoot', 'LeftToe', 'LeftToe_end']]
+SMPL_UPPER_BODY_JOINTS = [i for i in range(len(RIG_JOINT_NAMES)) if i not in RIG_LOWER_BODY_JOINTS]
+RIG_LOWER_BODY_RIGHT_JOINTS = [RIG_JOINT_NAMES.index(name) for name in ['Root', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToe', 'RightToe_end']]
+RIG_PELVIS_FEET = [RIG_JOINT_NAMES.index(name) for name in ['Root', 'LeftFoot', 'RightFoot']]
+RIG_PELVIS_HANDS = [RIG_JOINT_NAMES.index(name) for name in ['Root', 'LeftHand', 'RightHand']]
+RIG_PELVIS_VR = [RIG_JOINT_NAMES.index(name) for name in ['Root', 'LeftHand', 'RightHand', 'Head']]
+
+# Recover global angle and positions for rotation data
+# root_rot_velocity (B, seq_len, 1)
+# root_linear_velocity (B, seq_len, 2)
+# root_y (B, seq_len, 1)
+# ric_data (B, seq_len, (joint_num - 1)*3)
+# rot_data (B, seq_len, (joint_num - 1)*6)
+# local_velocity (B, seq_len, joint_num*3)
+# foot contact (B, seq_len, 4)
+RIG_ROOT_BINARY = np.array([True] + [False] * (NUM_RIG_JOINTS-1))
+RIG_ROOT_MASK = np.concatenate(([True]*(1+2+1),
+ RIG_ROOT_BINARY[1:].repeat(3),
+ RIG_ROOT_BINARY[1:].repeat(6),
+ RIG_ROOT_BINARY.repeat(3),
+ [False] * 4))
+RIG_LOWER_BODY_JOINTS_BINARY = np.array([i in RIG_LOWER_BODY_JOINTS for i in range(NUM_RIG_JOINTS)])
+RIG_LOWER_BODY_MASK = np.concatenate(([True]*(1+2+1),
+ RIG_LOWER_BODY_JOINTS_BINARY[1:].repeat(3),
+ RIG_LOWER_BODY_JOINTS_BINARY[1:].repeat(6),
+ RIG_LOWER_BODY_JOINTS_BINARY.repeat(3),
+ [True]*4))
+RIG_UPPER_BODY_MASK = ~RIG_LOWER_BODY_MASK
+
+RIG_LOWER_BODY_RIGHT_JOINTS_BINARY = np.array([i in RIG_LOWER_BODY_RIGHT_JOINTS for i in range(NUM_RIG_JOINTS)])
+RIG_LOWER_BODY_RIGHT_MASK = np.concatenate(([True]*(1+2+1),
+ RIG_LOWER_BODY_RIGHT_JOINTS_BINARY[1:].repeat(3),
+ RIG_LOWER_BODY_RIGHT_JOINTS_BINARY[1:].repeat(6),
+ RIG_LOWER_BODY_RIGHT_JOINTS_BINARY.repeat(3),
+ [True]*4))
+
+
+# Matrix that shows joint correspondces to SMPL features
+MAT_POS = np.zeros((NUM_RIG_JOINTS, NUM_RIG_FEATURES), dtype=bool)
+MAT_POS[0, 1:4] = True
+for joint_idx in range(1, NUM_RIG_JOINTS):
+ ub = 4 + 3 * joint_idx
+ lb = ub - 3
+ MAT_POS[joint_idx, lb:ub] = True
+
+MAT_ROT = np.zeros((NUM_RIG_JOINTS, NUM_RIG_FEATURES), dtype=bool)
+MAT_ROT[0, 0] = True
+for joint_idx in range(1, NUM_RIG_JOINTS):
+ ub = 4 + (NUM_RIG_JOINTS - 1)*3 + 6 * joint_idx
+ lb = ub - 6
+ MAT_ROT[joint_idx, lb:ub] = True
+
+MAT_VEL = np.zeros((NUM_RIG_JOINTS, NUM_RIG_FEATURES), dtype=bool)
+for joint_idx in range(0, NUM_RIG_JOINTS):
+ ub = 4 + (NUM_RIG_JOINTS - 1)*3 + (NUM_RIG_JOINTS -1)*6 + 3 * (joint_idx + 1)
+ lb = ub - 3
+ MAT_VEL[joint_idx, lb:ub] = True
+
+MAT_CNT = np.zeros((NUM_RIG_JOINTS, NUM_RIG_FEATURES), dtype=bool)
+
+## Feet contacts are different for each rig, so we import from scripts/motion_process
+MAT_CNT[fid_l[0], -4] = True
+MAT_CNT[fid_l[1], -3] = True
+MAT_CNT[fid_r[0], -2] = True
+MAT_CNT[fid_r[1], -1] = True
diff --git a/data_loaders/get_data.py b/data_loaders/get_data.py
index 3adf6c4..f0e9d70 100644
--- a/data_loaders/get_data.py
+++ b/data_loaders/get_data.py
@@ -21,6 +21,10 @@ def get_dataset_class(name):
elif name == "kit":
from data_loaders.humanml.data.dataset import KIT
return KIT
+ elif name == "custom":
+ print(f">>> (DEBUG) >>> Attempting to use {name} ...")
+ from data_loaders.custom.data.dataset import CustomRig as custom
+ return custom
else:
raise ValueError(f'Unsupported dataset name [{name}]')
@@ -33,6 +37,9 @@ def get_collate_fn(name, hml_mode='train'):
return t2m_collate
elif name == 'amass':
return amass_collate
+ elif name == "custom":
+ print(f">>> (DEBUG) >>> Using t2m_collate for the {name} dataset")
+ return t2m_collate
else:
return all_collate
@@ -55,7 +62,7 @@ class DatasetConfig:
def get_dataset(conf: DatasetConfig):
DATA = get_dataset_class(conf.name)
- if conf.name in ["humanml", "kit"]:
+ if conf.name in ["humanml", "kit", "custom"]:
dataset = DATA(split=conf.split,
num_frames=conf.num_frames,
mode=conf.hml_mode,
diff --git a/data_loaders/humanml/common/quaternion.py b/data_loaders/humanml/common/quaternion.py
index e2daa00..5051507 100644
--- a/data_loaders/humanml/common/quaternion.py
+++ b/data_loaders/humanml/common/quaternion.py
@@ -10,7 +10,7 @@
_EPS4 = np.finfo(float).eps * 4.0
-_FLOAT_EPS = np.finfo(np.float).eps
+_FLOAT_EPS = np.finfo(float).eps
# PyTorch-backed implementations
def qinv(q):
diff --git a/data_loaders/humanml/utils/plot_script.py b/data_loaders/humanml/utils/plot_script.py
index 428167c..1e225c1 100644
--- a/data_loaders/humanml/utils/plot_script.py
+++ b/data_loaders/humanml/utils/plot_script.py
@@ -180,9 +180,10 @@ def plot_target_pose(target_pose, frame_idx, cur_root_loc, used_colors, kinemati
# print(trajec.shape)
def update(index):
+ ax.clear()
# print(index)
- ax.lines = []
- ax.collections = []
+ # ax.lines = []
+ # ax.collections = []
ax.view_init(elev=120, azim=-90)
ax.dist = 7.5
# ax =
diff --git a/data_loaders/humanml_utils.py b/data_loaders/humanml_utils.py
index 40ba96c..0c9cdf9 100644
--- a/data_loaders/humanml_utils.py
+++ b/data_loaders/humanml_utils.py
@@ -65,28 +65,28 @@
# Matrix that shows joint correspondces to SMPL features
-MAT_POS = np.zeros((22, 263), dtype=np.bool)
+MAT_POS = np.zeros((22, 263), dtype=bool)
MAT_POS[0, 1:4] = True
for joint_idx in range(1, 22):
ub = 4 + 3 * joint_idx
lb = ub - 3
MAT_POS[joint_idx, lb:ub] = True
-MAT_ROT = np.zeros((22, 263), dtype=np.bool)
+MAT_ROT = np.zeros((22, 263), dtype=bool)
MAT_ROT[0, 0] = True
for joint_idx in range(1, 22):
ub = 4 + 21*3 + 6 * joint_idx
lb = ub - 6
MAT_ROT[joint_idx, lb:ub] = True
-MAT_VEL = np.zeros((22, 263), dtype=np.bool)
+MAT_VEL = np.zeros((22, 263), dtype=bool)
for joint_idx in range(0, 22):
ub = 4 + 21*3 + 21*6 + 3 * (joint_idx + 1)
lb = ub - 3
MAT_VEL[joint_idx, lb:ub] = True
-MAT_CNT = np.zeros((22, 263), dtype=np.bool)
+MAT_CNT = np.zeros((22, 263), dtype=bool)
MAT_CNT[7, -4] = True
MAT_CNT[10, -3] = True
MAT_CNT[8, -2] = True
-MAT_CNT[11, -1] = True
\ No newline at end of file
+MAT_CNT[11, -1] = True
diff --git a/dataset/HumanML3D_abs/motion_representation.ipynb b/dataset/HumanML3D_abs/motion_representation.ipynb
index 1458ca9..8d3ef85 100644
--- a/dataset/HumanML3D_abs/motion_representation.ipynb
+++ b/dataset/HumanML3D_abs/motion_representation.ipynb
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -21,7 +21,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -53,7 +53,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -244,7 +244,7 @@
},
{
"cell_type": "code",
- "execution_count": 40,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -440,7 +440,7 @@
},
{
"cell_type": "code",
- "execution_count": 28,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -514,7 +514,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -525,96 +525,9 @@
},
{
"cell_type": "code",
- "execution_count": 41,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- " 1%|▍ | 358/29232 [00:17<23:11, 20.75it/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "011059.npy\n",
- "cannot reshape array of size 0 into shape (0,newaxis)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- " 85%|███████████████████████████████▌ | 24903/29232 [20:17<03:26, 21.01it/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "M011059.npy\n",
- "cannot reshape array of size 0 into shape (0,newaxis)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- " 89%|████████████████████████████████▊ | 25957/29232 [21:12<02:31, 21.59it/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "009707.npy\n",
- "cannot reshape array of size 0 into shape (0,newaxis)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- " 98%|████████████████████████████████████▏| 28591/29232 [23:25<00:31, 20.05it/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "M009707.npy\n",
- "cannot reshape array of size 0 into shape (0,newaxis)\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|█████████████████████████████████████| 29232/29232 [23:57<00:00, 20.34it/s]\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Total clips: 29232, Frames: 4117392, Duration: 3431.160000m\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAD4CAYAAADhNOGaAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAABl8klEQVR4nO2deZwcVbX4v6e7Z98yM1nIHlC2oE8keaiouCT6w4CI8EB9SWQRQhJQ1Cgg8BQ1cVcEMQmIBHTmuSCgCEEgEVyeCwYFIUFEWZJAyJ7MksnMdPf5/VHdPdXVtfU2S+Z+P5+a6aq6y6ntnHvP3URVMRgMBsPoJTLUAhgMBoNhaDGGwGAwGEY5xhAYDAbDKMcYAoPBYBjlGENgMBgMo5zYUAtQCGPHjtUZM2YMtRgGg8Ewonjsscd2qeo45/ERaQhmzJjBhg0bhloMg8FgGFGIyItux41ryGAwGEY5xhAYDAbDKMcYAoPBYBjlGENgMBgMoxxjCAwGg2GUYwyBwVAAOzoOcs5Nf2RH58GhFsVgKBpjCAyGfGlvJ/aqI/jx4jcTO+IIaG8PjtP5Cqx5D3RuL798BkOeGENgMOTBsjMuJ77ww7Ts2kYEpWXXNuILP8yyMy73jrR0KYyZBBf8CsZMtPZd2N7bzxl/fZYdvf1lkt5gcKckhkBEbhWRHSLylO1Yi4g8JCLPpv43e8Q9NxXmWRE5txTyGAzF4Of2+fza7xDTZNaxmCb5/NrvuCf2qhisWgXJ1LofSbX231CdHa69nYrDD+eu2UcTO/zwcLUMg6FElKpGcBtwiuPYlcB6VT0SWJ/az0JEWoDPAW8ATgQ+52UwDIZBIcDtU9fv3ibgery9HZ5LuOfzl97Mz49d82V6zzuflm0vEVGlZdtL9J53Ph+75stZUfp37OCFBQuJ79yZ3zUZDAGUxBCo6m+BPY7D7wNuT/2+HTjDJer/Ax5S1T2quhd4iFyDYjCUhM7OTtasWUNnZ6fr+WVnXE7fuedluX36zj3P1e2TjIEKJKM+GS5a6H3OtjDgtd/5GlXxbHdQVbyfa7/ztaxju1auouexx9j53ZU+mRoM+VPONoIJqrot9fsVYIJLmMnAFtv+1tSxHERkkYhsEJENO02JyFAAv/nNb9i8eTO/+c1vXM9f/eBNVCbiWccqE/GM2yfR0cfOD91Aoq6FSBxEIeJR4AfggPcysPYzzR37XMOkj780bRp9FRUc9vlredW/niV50008fcyx/ON1x/tkbjCEZ1AmnVNVFZGiFkdW1ZuBmwFmz55tFlo2hGb58uXE43Fe++STXLZ+PU3797OvqYmH3/Uu3n/HHZlwzT0drvHr+g9Cezs9G7bTct/XiHTvRSWC2NoK+hsjVAbIsaOumUtPv5wb7/kq47r3hRO+vZ1Ju3YhcctAVcbjTNz+CjUnvJ7G224Ll4bBEEA5awTbRWQiQOr/DpcwLwFTbftTUscMhpJx2WWX8Z69e3nvL3/JmP37EWDM/v2cceednj147Ly07CF2//gZam+8iljHdgRFNEkyVpUJU9GRzI1YV5e1G0vE+fGPriKWsFUjfBqFk0D/+ecjPT1ZxyOq1D/4ILFxObMJGwwFUU5DcA+Q7gV0LvALlzAPAO8WkeZUI/G7U8cMhpLR0NDA0bffTmV/th9eVNFVqzLKWFpbXeNPXP1Bmn57M5F4b9bxSLyXeMN474z7ugFYNu/jHIhV0XKw02p7ONhJT6yKZWdczp6llyIe0SNArN+9K2lFPO563GAohFJ1H/0R8EfgaBHZKiIfAb4CvEtEngXmpvYRkdkicguAqu4Bvgj8JbV9IXXMYCgpTfv3ux4X4MCiRdbO9dfj5nOMdu8m2uE+ECza6VbRTdEP3bUtvKHicLSiNutUbbyXTz58m2f7AHgbJoOh1IjqyHO3z549W83CNIa8mDEDXnRdkwMFpK0N5s8nKeJaOnK2CUDa538FN97zFcZ178ukkUGEnqpGqns7OVjVQE1vhyNNAVXXGoECiUiEWNLF5ZSWeQR+u4ahRUQeU9XZzuNmZLFhdLBihWtpH6xawb5LLsn8dkVzFXLrgX385EdX0nrAqm3suXAJyWmT6V7zIx6cexVJiVLT24Gg1PR2kJQo69726YF8p03zFTnqYQQAZPp037gGQz4YQ2AYHcyfT2LReZ7GwMt1lEEGPpW2152CAtFUaT6aKplvGnc4kS0vE1mymHc+/A0imt23NKIJTv6/G4FU99EVK/K6hM76etacey6ddXV5xzUY/DCGwDBqeOXVezwNgYpnXSAVYKB0/sG/P5hTcxDgzVv+jgI1vR1Ek16NvFYPIIVsN1IIfnPyyWyeNo3fvO1tecc1GPwYkYvXGwz58PAjMxn34C6O/dYuT9ePqPrP7yORjDGIuriJwDIGXRXVVKpQGe9xDWMPCylfv8t5BXpqmqnr2cvyq67i2H/8gznr13Pq2rXsb2rizrPO4unXv55rrrnGNx+DIQymsdhwyNPbu4PktMnU7PDvctldU8PTK8bw2i/uoGpvtlvHS2E7SX9NQWHTjb0q4mkIfvuWj/G2399AXzRKRSKRFS5eWUn/ypXUfOQjIaQyGCxMY7Fh1FJVNZ5qmxHwKvrU9vRw3Od2ULEvSV/UUVmuFzi3FuosddxV28Ld7/0W3TXh50hUIG1e3OoU3Y40eyvrOFDVkGMEAGJ9ffQuW+Y7d5LBEBZjCAyHNKsvfYQH51yVVUT3dA8BvVJHRJXKRDzbYCQUvf1AZjrp2gN7OeOXn6SmZ19e8qTnqIsAu3p2ZZ2rcaT5rl9/mb4q9xoDQFNHh+/cSQZDWIwhMBzSnHf8i8z5/XU51QDP3kMdHXRXW1NHCKCnV0EEpCdlQHqACEROr0SACIoCTx5zal5yddfUUjPR6j765DGnoqm00mkCPHXMqTR1uM9/ZF2Ectl119G7Zg3XXnsty5cvz0sGgyGNaSM4VGlvJ37FFURfeonElCnEvvKVUdfTJNHRx4HJU2no2pHj40/iXgpKinCwIkZtX6rXTwXg1gHIcVwRJGUUwLsB2NlIrKm4ERfTlERArLmF/NKKRyL019VR3dk5ap+1IRymjWA00d4OixYRe+klBIht3YouWABz5w61ZGXBdcGW9nZ41eHUd1lTQLh193RDVKnt60eBF8a2uBsBcDleeIFKPOJak9sFpxtLJqnp7Mw8ay64YNStcBa01oTBH2MIDkH2XXIJHDiQdUwAXb8ejjtuaIQqI84FW3af8Vl04blEd72cUfj7K2uz1K1fO8G+xkYEmL5raKe96qmp8ZUToD/qsjJOXx/dF11E9/7e3HPloPMVWPMe6HSfj6msLF2KxmLUNzby4Y98hF0f/ODgy3AIYAzBIYiXX1kA3bRp0IxB1949/OTaK+net7cs6f/jdcfz9DHHkrzpJl71r2c57PPX0ldRwcE/fQ+xjepV4OtvO5c91Q1Z8f3aCWDAdeNJTTHSW/THvJV9TY/7WAS7TFlTWtuo7enhL/c+X5xwQbS3w7Sp0DgRPvEAfP688ubnyDtZVYWuWoWkelVFVZmxdi1//s//NO0l+aKqI26bNWuWGnyYPl0VPLckaCIaVW1rK6sYjy34gO6rrbHymzKl5Pn1bd+uu+fN04RIzvWlfx+17C6dfsW9+tHTlumu6oascwmP+2NPzyuM2z1V0ANVjYFh7L+TIdL0Oh4Ud3/9eH3gnZ/RVZc8XNL7rqrWs6xw5FuB6pnVql8cV/r87MyZ43vtcRHt6OgorwwjFGCDuujUIVfqhWzGEATQ1ub7oWQplCVLSp79dfPP0HvfcLz2RaNZ+fWVwfj0xmKu19aXOr69rllvO36edseqchR7kKINCleIcg4bfm+jt0HJR654ZbX2fO+2kt5zbWtTFY88m0S145XS5mdn5szA605C+fIf4RhDMNoIKDWV0xh07tmt+2prXPNLiJTUGIRR5lsax+WlPBW0u8aS/8D4aKjwYQxBPuGDjjuNrF/cjvoJJbvf2tamWlvrfW1SRiUcwggoaCISKZ8MIxwvQ2DaCA5V1q1DZs4M7MsiYK3SFWLJxrDUN7fQeMDdvx1RRRcuLGl+QUzuGOhN5PTHe92ftH++eoff6vThCXoOOeE9JsFLp+PVNuBGfbfP4jn5smhhTkeELFprvc8Vw3HHoZs2hZq6I3LxxeWR4RCmrIZARI4WkcdtW4eIfNwR5u0ist8W5rPllGlUsXHjkBmDnjFN3vmpZi0RWQ7sCiMh2a+5eoRzxt/X2Ij6fCEK9Da79NoJifrlr+5PLd2A7acQc7rKBqx7EIr2dmhtgQM+b1NlFL59c/F5OfJNVlWFNgIyZw6sXFlaGUYDbtWEcmxYo+tfAaY7jr8duDeftIxrKE9CVqmToDpnTmnybGvT/ooK3/wOVFUWn09Aw3hfLBbo8/dzwwT5/JNSmKvHL22/9oFQ/nHnfrGuuCB3EKiWo/PBkiX5udrK0N51qMEwcA3NAf6tqu7rBRrKx8aNyJw54WoG69eXZuDZ/PnE1qwBt37uKap7+4qvFaxYQTLmPZt6LB4PHEzmdV+8Zga1pyNqxd96aj09VY2hZij1kiONV/ff3ooK3/TdagrS2lr8KOMgdxDA7beXdjRze7vVNTQgmALEYtYyoaYmUDhu1qEcG3ArcKnL8bcDu4EngPuB4zziLwI2ABumTZtWNot5SNPWpuroajkYNQO/Ul1JGo/b2qwSqU8J2a8W4FeKj0dDlkglv66gxXQbDbuVrDYQlFdra3F5uOQZugY7c2Zp8z7EYSh7DQGVwC5ggsu5RqA+9Xse8GxQesY1VAT5fGQOY9C5Z7f++HNXaNfePfnlGVTFr60ticLyyqM/EilYqQaGqxgIl4+iztct5NdLyDO9YhW0XzfR9FZZYpdQPoUVYwTyZqgNwfuAB0OGfQEY6xfGGIIiCel7TWL1u49Pm6ba1qYPfe9GvfeNx2tvXW1G8SWam8MpgtZW37xKUTNwu6akYytE4ae7kvpt2+ua9ewPfVm3140JlXah3UbDniu6NhCmXaC1NVwebW2anDZFk4IePKxG+25blX2uuTnzfIIMXubaSlVjHWUMtSH4MXC+x7nDGJgF9URgc3rfazOGoASENAbpD9OuTJ2jbUMZhBCNx3kZFjccDcf76+v11nPP1Y66uryVa17hKtDd1Q2aQHR3dUN4RR3yeBhDlGOYphyhZ6/+g27v6Al9++L7e3X76sc1ccttwTWBMLWNtjZNtjTnXFMiJp7GOVRjuGkULpghMwRAXaoNoMl2bDGwOPX7UmBjqo3gT8BJQWkaQ1AiQhiDoB41zpG6vorcx5eftdXW6o4frtZz7z9Xdx7YGXgZaZdVz803ZbUJHKyo8JRTcTdoXjJ5KeNPzvu49kazRzf3RmP6yXkfz/u+ermFAnsuOa4lLhFNILqlcZz+5ONf9r95bW2anDpNk4jGx07Sjtefocmqav/n4+cOSpf+A+Q+MD6mv147I/R1Zs6bmkBRDGmNoNSbMQQlpK1Ntbq6YLfJvspa3V43xnIhpXy7vm6jEDUDBb3mgil6+aIpumdCoyYFjU/1nqvovhu/ofe+8Xg9kHIx3HX66drvYnASjuvJt6Tuds6tBmA/7nfvtow/LHTeYWRKuITtjlXpR09bpkddvXbghnVsU731FNXbvpvj/knW+D8Xz26iLqX/IHfZweZo4DVmnTdtAkVjDIHBn8rK4A/RtvXEotoTjeaUhtPKKW7bzzEIbW3aU1mhXVVVOaXZE26eqa+57TV6+aIpeqAyu9Gwt070YHPUymNsi649aZZ+45xTc+Y18uuHb3d1BRkj5+aWbhjl7TURXY7LxKWR9KCP0Qx7DVsaxw24iJYsUZXsMRBht6RbTcDFAKQNUhJ0d02D/uQ1c3KfQ0Mk9HWYmkDpMIbA4E+I3kT280ctuytrDh+v8PZSeMYgtLVpPJZrQBKg+2tEL794ir7UmqsAE5Hc9J9/8xtz5jUKW7vxKuXnUzIPE7aYtogw19Ivub2issKJWM94rEdvnBPCNdA+N2/ewLvicP/Ylb8zbm80ZtVKlt2lChqvFH3xzMZQ98YYgdJiDIEhmBAT1SVBD0Yiur0utxEwyCAkQK876xTPCenSYXui2bWEX6+dob2N7squkAZHe63AbZppr6mnS2kInI2lbrWNoBHGbq4gt21XbZO/zz5kzaA/GtWfnXmmfvGqq/K630qqVuJwERkjMPh4GQKzZrEhm7lz0fXrQ41g3VtdT/PBrtBJKwNrBYcZMaqpsL0tUSr3JDzjJCOW5ohqdnyv8Olz6eBuI4294h6oqaHWY8EYJ3uqG2g52OmaXjrv3x0/m5Mf3+AZJugaguiNVlCV8FpvM3/2NTURjcep6+7O5B9GjrS8PeNj7PrPaqbc1xX8js2ZA+vWFSuywYZZs9gQjnXrAqejSCvRl+tbSeaRtGBNOAUExhOsl1OBij3+M21K0jICaUOTxusaBGuJx6BlIN1wWzXMK5/mg52Bcrz1catAk/SYbdSNMEYgbUiLNgKOGUKa9u+nvrs7Y8zDSr2nphGA6h3xQCMAIJWVxggMIsYQGHIJaQxm7rKmjcq3TmlX8k7l7RU24bMspH1GzojtmB/2aZzd5PdT3vsaGwNSz40TRMRRMw9b4ncjq6ZT+OSoFg4bfKC2Nm+5+iVKa8/A/ElB8ZMicOuteeZiKAZjCAzurFuHLFkSaAycL1A+RiFdorQbBa/4sZ780g5KL10r8JPNK659UriXGsYVVLPIcQPZagT7Gv0nr/M71xuN8fz8JvrS02OXZjkFK6lIhLqgyedspO9fVBMkCTZuClBfT+SHPyztBHaGQIwhMHizcmWgMYABpen0veeD2DavWoJf2s5zbkbKSSy16HlnfT1rzj2Xzrq6rPNeedmV9qTOne5hfNJxyqlkrz/gNfuor0yp/5WJOM+f18LzC8cU/nU77GNGoSfDOQLtNRJ7TS2wTWDJEujsNEZgCDCGYJizo+Mg59z0R/Z//zYS06aRlAiJadPKuqhLFnkYA8gu9RXaDcFeS3CqnqCScjKae8wvfAJo6OrivNtvp767OyvPMIvGOBe9SWOXO51OV3UVP3n7G+murgIGjGdPTU3GID3y1rd6yutVonYeP2n+Fib/Yh+aTwOOncRAunb5g/Az0kHxZMmSQZ1GOtHRx46bniDR2TdoeQ5njCEY5tyw/lkm3ncnNZcsIbplCxGU6JYt6MKFJEVIto4l2dpaXgORpzFwcwMUWktwa0vwdVclco2HV/i/nHBCVqOn3VUVlqiHtnWmkQTqDvZyyqOP8/yEsQPyYjVAJ4H6ri6Of+KJvHzwvdFYTviqHXHqX/TuZRVEsQZAliyB1Psy6EagvR2dNh2VCDptOixdOrDf2oq2jkUlAq86nOh9d9KxbnNp8h3puPUpHe7baBhHcNTVa3X6Fffq9CvuDRy4ldX3WsTqs9/SqomWFk0gWb/jU6cWPqlbW5tqa2uofuNJUBXJu49/qHRDhAkTLh4w3bHbSN/0lp57KI57GPtxZzpxEb33DcfrdWedon3RqD4/dVpmSox8B5/Z/4cdV1CKLWecRH19ZrBgoqUl/DtS6ARytjmSki0tmmxpTY2WDp7COvNcYlW667T/0S1X/Fa3XP27wuQYYWAGlI0stu/v0UU/+IseceW9mvBQNgV/xCkFnYxGrTmC8jUO+UxjXVWVozjclEm+Ssht0JdzkFYSfwUZZkBT0ICxoPO9HpPs9cai2hOLan/UmjLjpYZWz3l5gmQNa/h8rzXEAjyuRj1tANLvRdi1BMJOYa1qU/rWO5uvwvfb+hsn6K4fPa3xjt78P9IRiJchMK6hYcr4xmrG1leRBLY1jitp2qJquUESCSJAdMsWuOCCjIsp0N2Uh6so0tsLtbVW9b++3nPh+KC0ctJ1ieNscHa6etzCl5uKhHu3na7aOqriCWKJBAmJcFjnbr5137d44OiTfNNTx+/0ftHX4mgXcMszq0G+tdVaHrLTGiuRbG21lpZU/yeppFxBu3blNgrb3ToZN46gCxciWzZn3llridB83hhvoh07iFTFiDZUliS9EYubdRju22ioEaiqLvrBX/Tqu5/ULd/5nh6oqCpJCaigrbLS3bWUx+Li8fTCMyn3klcpN9+SrVftwLMEm0d+QbLsbWzMTK6Xc70BaWRqDJHcKazt8/I44/jKVFHAs41my+M1NUba1ZfjygmzgI09jTlzXFw61u+giQ/LscXHTdKdP9g4+B/3EIFxDY1w2toyC68Mlh/Y84O2tUP0jxmvicq6cK4ipyLxMQqlMgihfdU+x/3OB51zbsrA0pmeU1jXNGS5iJwK2ZleSZ+t228vN05bW2A7iz2t7orSuXQKuZ6cc5XVpV1mcwQwZIYAa+nJJ4HH3YTAqnHeAPwL+DtwQlCao9IQ2EkbBRHrI/UpYZd72/P/PqVbPv2I9kz5j/DtBi0trtMZa329Jq9qUG2SnDj5fPheC854Kc8gQ+BX4whqLHZT2mHbF9Jbry0PN4OZANWIe1qem61NwPV+1df5K8k82gO6qkQvv2iyfmHhxEF5JzP3aPp0q+Dh/FZErGOjzAioqg61IfBcgxhrwfr7UwbhjcCfg9Ic9YbACzcDUSZjsWXZQ1Zviyt+q7tO+x/tb5yQUni5PYU8FXadi7K57bv+H3ceyiCjJOtFk+fWqtZJVloJxNM4ONMp5Jxd0doNVFhDYDcim1qmZtxFCcc5PwMXVsacfS+WLNGES28wr/Q7K9HLF03R19z2msx2ws0zS/IOZtVYRrmCD8twNgQ3AR+y7T8DTPRL0xiCPGlr00S6ex1oMuWayCwb2dqqfY4FZoK2eF2r7jzji7rr1Ks1ESus/cKurLPaHsa5u0wKNWLJ10U12ZRSXuKvHPNR9mGUuV96+cQt1iXkjOtrBOorBvz4U6dZpf90r52Qedn3D1SKXr5ois6+aaZesWiy7mzK713LSjP9zhqFXxBehmAweg0p8KCIPCYii1zOTwa22Pa3po5lISKLRGSDiGzYudN9WL/BhfZ2WLCAyJ7dA71qkkmr98fRR1uf165dVNx+G0yfDiLQ2mptqd/90VhOstHu3UQP7KXpd7cQiffmnNfU5ke6p4uz55IeftA1rgC9EsmaR8i+5XBVA5xZjTyVQPZbPaVQkKj3tNBeMrqhtv9u8ZMe6QXdlyA5/GRKptK3/0/HcfaogmzZM/eyqz/VS0eRLZth1Srrf0De6TScYWr6lMvu3E5vhVDXk2Ts/rh73Ggs673L/J4+3eqhpAq3306yrg598UV0wQKrV1FqY3KO2jCExc06lHIDJqf+j8daoP5kx/l7gbfY9tcDs/3SNDWC8Hj1akmXshJea9Da8Wio3nnGFwNLiIWUIAspgbueqxdVjzV4A9fmdaRbSCl8a4P7QMCgtocE5CwB6ozrVTsIU5tILxKU617K9fnnUwsJejZfXDhRL7t0auEunTC91CZNKtWnc0jCcOg1BFwLfMpxzLiGykWI5SczH3DYEZ7OdoiI/zKJSrh2g7SC8nKBHLXsrsA0/OKHUfBhjUwpwvWmBkaFkSuMog/Kc8AI5DZi91TWak9Fbc7xUhkBBculU4wPP49BjAZvhsQQAHVAg+33H4BTHGFOJbux+NGgdI0hCEmqgTjMlsSj8TaItjarG14IhRv2Q3bzYwctjVlo+0FY+cIqRvs5rx5FYZR9IW0PYc57pe92LMyUFWHue29VrKhpTRJ14bonZ+QxbQeeeBmCcrcRTAB+LyJPAI8C96nqr0RksYgsToVZCzyH1X30e8DSMst0SLOj4yDXX3AtfVOmkdy9m0TIla8EiHR3owsWkKyvDz953fz5yK23WO0LPmmn/2sIOZy/FRjXvdczjhLsu/aKl45r9597kV5FzG81MfuZSMDV5ruOgV9q+bQ92K9VHcfteYWaOjoo3+nTqfz+bflPLd3eTrK+Hl2wgIhtWUwvdjbFOO/Kw9ndFIOLL84vLwOuJe7hvpkagTc/+fiXtdvRiyfXPywZP7HX75frW/Tsj9yg+25Zo/GpU8NNXudROwgqjXqV7Jw+9F4PH3ahNYFCStzO6/DqypneeqNRvfXcc7Wjri6vfMKU2PO5F2FqO2FrAV7yZT2ryqgmmq1aXKK5OTMRnefvKVO056ab9A9nnqaJigqNi+19FZd32HFsT300e6yCwRXM4vWHNkdfcz+98SS/X3U+UzqK71V1zbuW0FlZw9fvv4HKpEcvj9QGgkp6/herjJi9JoG93DhwPkwp3l5qT+MsuZZyziC/9HorKqjqz10DWG3/vVZsU+BABVx8xRH0R6EiDtffuDmnB02S7LmRxOV/IbK7nXP78sM+E7fw6vwtIOp+zP47Kdaa0wmBiIaXw0lPpXDteZNZe9IYKqOVPLbgsQJSObTxWrw+t1+gYUTyu8vfwfK1TzOpY1dR6Ry97C56Y9YEXL9fdb6nEYABF8P+1Mpejd1d2NXBgGLMURFZe0GLx+ASZiiKL5UuRsCO09w5/9f1Q9vy51CJIJrM6brp5ooJY+iS4GtYve6Vff2IfAyzlyF2TvbnzNh+zP47mjIM0SIfarqbqqB86tuPF5fYKMPMPnqIML6xmoaqGC83jg0O7MPvVn+E0zc+QkW8P7RRqevpoam7K8vX3lVfz+2p5R/TCs9tg2ClLpDT1mHPq5QUkp5zYRsvv3lmNlRN5h6zxUnmxPTGaWLjLl+085rs6XsZIK98nOGc11qKcRLFcNjufmusQk1x38FowxiCQ4hdXb384YJl9FZWFZzG+O69NPQdIB6Jsq2h1Tfs9Pv/xJbxE4klc6da/s3JJ9O4f39mnVsVcVWYYRppAaKqWQOlnL/DUGw4p5xB6fmddyr/9DG3437pp8On48aSuWHspfb076TjeJh8vEjfl6RjfyjoqIuwe0zFEOU+cjGGYAjZuLuLay79DPFYLDM6MhGJwNLCOk7dtHA251x3BVW3fj/Ti6eQD3JX7RjmP74WBXpi3vO0Pzr/NKbs2JZ1bPlVV3Ht5z5Hb3U1773vPmp7elKlYM1SFk6y2xS8w9j95xHbcaeCdtY67HkUQ769c8LUdvLx1btdlxsvt1a4luLTv/OtBYS5johjf7CNQV9MGNOd5NvxUwtLYOlStCKWNVpZGyLEL/pQaQUdjri1IA/37VDpNfStCz+pB2MVnr043LYEaH9FRbgBYI50nXkkcExOF4u59lrxG2S0p6ExK92O+nq9ff583dvUFNhLxWspyLz6jLv8d27OHia+6YaYiyjf8QQlGZDluM4wPYjcel7le28LidtvG2RYaH5Z11hfb/1PDcLbVd3gn64XS5ZoMhb1/b5cZYtS+JKawwzMCmXDh8PW/Y3DHn6c/7rnR1TFcxsg/VwoESDW34+uWpVdckltSRHilZVWraK+3leOI674JUcv/qG1WlQyCf391pwujnhesgAkVOmLDfQ5aOjqonnvXpr27/fMd8BXnl1L0NSG7b8f9r7w9vDp/bT7I6J5tClocO2k1O0S6TTDXHM6TJAM9vfFHjdMLcArXJhaG5BxB6bj9FQ1ZqXtFt81vyVLLFXc2Wn9j8c5+qq1zLrsR/7yu3wXKmKtoBZP+H5fbkgC+N/VJL7QFHDlIxdjCIaA9a8/kmk9yuSdrxScRihD0dWV4yZJAvHIwGNPquOznD8fOjuRtrZQiqm1q5PKeHbPou66Onqqq0NfR9on7ryWeDQaOr7zGPi7P/yurS9aFUrZB/nNu2pqMr+9FKCbXOnn1OcycC2MMXN75mGNgFcaA2lJoNFynq/u7ciS3e9+KEB9vVUgWbky53xfIsnpGx/2zTtfRR8G3a/EL/ljESkMb8w4giHi7b96nB+e+x6m7ijcGBSD86nb+6/bCduY6BY2Ho0S81iz1xnfL58wSgzgQE0NtT09nuGTqW6bbrLmm186jF9Y+7l80iw0fpg0Cw3jDD/wWzxHUIe9R67nXazN4Zf/EkT451ffS+Ugt0BorSDd+fTnGp54jSMwNYIhYn8iyZ2n/zf9IUq95cBZUnJKkc9n5lXKi6aMQLGfbFi3SY2PEbBcRMlwJUNbp5NSqJu+WDR0g6tz389V40bS9r8cRiAt10ANVHNqDIWklZYlc9zhzjt62V0gwv3fW0LFYBuBGPSd0jyoeQ42ZkDZEPG3U0+AU0+Aih5YvRpS/vJy+J+daAi1ZFfuYWVy+6D9SrnOeGHTDnLJ2LFcGZrfffUfN5bB2Z7hlUdFvPhaUdj46ZJdmBJeqd43yfqtOW+WAJ1142no3uEZXxlwESqQjESIJJPsqGvmjUtv571P/4av3X8DVYn+QfpGUtcVjSIXLaLKxU11KGFcQ8OJpUszRsGLsB+vSsTqB+FC2A/J7YPOF1clWQnaF2wgguQpBYWWmAt1H2WVekOkFWRknHETIkRDfNODUejIx7Xldj7U+1cn0K/Q5/J+CFlTXPjllUU0CosWubZRjHSMa2gksHKl1XvHp6OdLFkCLo2IimRV0cXmBim0wcwZrxBF7HQbKUBfrmJ0pu/WUBnUEFjI5kaWrD7X5dnYaQvjTDOzL7mTb3jJEnTOLm+QEXDe73IWA515uD1Ty53kHT/wve1WiLuHT89ppICmE4hGB3ojeW3x+CFpBPwwhmCkkTYWbW1oS6tNSWrwR1MkbqXafOLa4zsVhD19e/hyX5MbpcjXrTSb5VtPzTWUxk0Zpt0lfjjvWZBMbvdYgYQtTPq/2zPKF7H999qKVkJJoEmspUld8s8yCokEetNqqzvp9Knhp1s/xDGGYKTQ3o5Om269wJEIumABYluHeDBxlnTzjeu2pUuGg+moDJuXVzjB6qlUjJK0K0OvGkaQSyVMrcJLwafliLic21cf5e43N2XCOEvyYcmpEXqEebm1iKkh9itc3xUohwCSTBWaNm8dWPd4lBuFshkCEZkqIg+LyCYR2Sgil7mEebuI7BeRx1PbZ8slz4ikvR1tHWu9qAsWpBYUB9E8Gz/LSL61BD9FMFyuKR9qenpyjhVairYrzPSMoGHi2KfdcHO1+Blfu9tNRbjhrFMy8cZ0JXj//+3PStOrphR0reL478bE3SFb6d2oxnIT5UnmetJGobp6VBqEctYI4sAyVZ2JtQTlJSIy0yXc71T1+NT2hTLKM7KYO7fspX7nZ1OMz7gYt5EzjcEibH75yuWmLMOWpJ01hKB2Crc8veK6legTFQNGQVT52J2/cm3/CJqbyKvNxBmmL1qVkaVkxID/KH6iOQGkt3dUGoSyGQJV3aaqf0397gSeBiaXK79DhvZ2tKICXb++JIrR2QBrxy19vwbUMAS5jYKuKYxCKSWlqMmkR0D7uXEKaXNwKvWkDHyuYUvg6bBp2ZxbtD9bvnR+EQqTOUi2ikSv6/F0DcivETlJrjFTQOOgj/Z7Nkbn69KyG4RCJ4AcaQxKG4GIzABeD/zZ5fSbROQJEblfRI7zSWORiGwQkQ07dxa/AtewZOlSqxYQj5ekkbIvmt/jDSrxueXhlbc9jlvJ1I9iaiZhsStH+7FCiCUSgb58t/zD4tbAHNZtZDcAXmGSEXh47YycePnIaL/+Qt5d+0yydnntCtxpnArdwiKArlo1KmoGZR9HICL1wG+AFap6l+NcI5BU1S4RmQdcr6pHBqV5SI4jaG+3jEABUZ1K6NtnnUIiVUr95E/vA6wF1/26FhaiyAbLlROmJhMGu9+9lLIHKWe38xm/fAGyhI2TT7i+5ihVe3MHvinQUwG1Hu57rzzSx5MiqSVMc887iynOdpFyvl/7GhsZ09ERKmwiBtH+waynlo8hGUcgIhXAnUC70wgAqGqHqnalfq8FKkRkVC4tlDzvwtAvvluvDzsX3vcwx7z4Esc+vyVzPpKnEUhK9rgEt+p1MdXwQkrEEKwc/Hzj9hJlIRTTfuLl8iqkbSVMiT0vAyO4GoF0XtUhjYD9nUmfj2h2t2a3doy0G6gYd1S+NKWMQJj7HolzyLuIytlrSIDvA0+r6rc8whyWCoeInJiSZ3e5ZBq2tLcj8YO+QTIvbGurNTOjD/UHe6nsj3PKX/7u22CZiESyPtqsarjHB1yqarik/tjz9yMpEd/07LKnS6JO3/hQ4Ne24BUurB+7P1bjHl/CX68Cu2ZVBxrzvXURZt08M6cAYr/Pbu9M0DU4DcNgTesm06eTGaCJ/z0XILlq1aDINVSUs0bwZmAh8E5b99B5IrJYRBanwvwX8JSIPAHcAHxQR+KcF0WSuHBpoJKTmTNB1Vo7YP78wI/sQHVlTjXbvinWvPHF+lGLYlb2ZGxuDz6JpJSMt4pw+qedSilfSt1GYXcNedWWnM8qTA2rIt6TFScnwwB50vHGbTjoa8wjwJjuJBsWbcrEH1jrofD77GSwDIICrFhh7axcaRmEOXMCjcGhjJlraBiQXs/X9Rwgc+bAunXZJ447Dt20KdCAgL/vumwvuIsvJKwrQwg/WZzd156OWwh+bhs3Gd3iet5n2/TXbum4jRkI+8zs+2HaHUr1zMv67tjycGtLcF4zecji+T2B1Vlj1SrvNo8RqCudmLmGRihSX+/+0m7ciEya5BrH7vLxTZv8Sr5h/f+ZwC75efnLvfz3YdogijEC9rhutaaS4FKbyXLDueTthVMue+0Bx38v41aKGk9J749PHs4R12kjsG3CBF4ZOzYTLojMc16yxP17glE3v5AdMw31MEYBWb3aO8BLL1ld2849l4QqkdQSgfalAoM+1qASpDOsUz6vc85wlj+ZzGyQfmkBnoud+KWfT3i//XwJUopushVjtJy/803PWZouVJ6w745fLSZsPl01NbTPnw/Am/7wB167aVPmXFDeVkCBH/7QWoHPkINxDQ0DvFxDgdXRVFUWiqum+7k88vnY7fTGotx18olMfWUXb3jm31QkstexLRU90Rg1iXhwQJuMfu4dO/kaF797GCasnyz5ymPHa/U5tzzzzaNQJZ/Pu5UObyevWoAzjSlTkK98xdUoFPwtjhCMawjo7OxkzZo1dHZ2Qns7yWnTrAXfp00bOYNG2ttJTJ8+sBg3+StprzB+bpcgnK6NqniCD/36j7xl07MZI1COhujqEEbAfj1e+YdtMPdT0oWoCbf77tVoWyhha3uFXINb7SKfeGFdVfbwYe+FM2zmXm7dii5cOOK6hHbt3cNPrr2S7n17S572qDIEjzzyCJs3b+bZz38eFi0ismWL5YfcsgVduHDAKCxdOmAkWltJtrYOD4OxdCm6cCHRzZvz/iDAX4klatxTK1QJ2eOFTcM5hYAfYcPkk78znhulMmR+Sh98plNw2UrZwyYfY+A0XpD//emsr+eWCy7gl/Pm0R+NBt77hMtaHGHImYhPFV21Cq2vHz6FwPZ2klOnWrqmpWVA70y1Zkb9050/Yus/NvLHn/1vybMeFa6h5cuXE48PlBw//u1vM2b//oLyVhFQRVtaAJA9e9CpU4l8+csF+x8Dq6Pt7SQXL0a6usriAgo6F+Z8qeK4peGWZr5+6Xzl8apBBNUsglwlTpeI1yyjhdxr5759VtJCn10x70wYrr3mGohGWfCDH/Cq558vW55BadLSgtxwg+fo/qJcQ+3tJK+80qqJNDeDiKU3HL8j3d3Q1+eaRH80yoOzX8s/pg9M1xatqODjbXfnJYqXa2hUGILOzk4eeOABNm3aRDKZ5HOf/3zJXRRUVpKsr7ceap5Gws8QaEUF0l/+dVoLafQb7mHtcdIUGrfUDaNpA1DO5+p23aW414W2J9hZftVVxCusGUPfc999nLhhQ6j7VgpD4NUeFNSG5GsI7Mp+yhQi730vyV/+Etm61VL2JdCz+2truOW0dxKrrOLVJ76Jty/8CHVjmvNKY1S3ETQ0NFBVVYWqEovF2N/UVPpM+vqI7NljuZr27Bn4vWULXHBBtnvJ7nqaNs032UiJjYDf6xjWT+sX1ukqKFSWYrCnm3RIUogv264k8mk7SYf5x5FHZfbTW9C0zqXAzdDk49N3cxPZa2QKzLj8l3nJlLl/NhfP7Mcey5Iz7FoM+RLkjrNfl+v9sbtupk61vuPUvi5cSGTrVuub37oVXbUqs1+qRubGAz1EKyqI9/dRVVOTtxHwY1QYAoDu7m5mzZrFhRdeyHMXXkh/RfHzl4fGbiS2bIFVq7LaJ5yk/cOFlnwKwfkRBPng3Vwm+bht3NIoBXaXhqVw3ac9KNQg5ONDT9PQ2ZGzsMtg4qbcwt4HLyOYiFiq4zPrv+cb35l+Ot+PX389zbt3g2pmHqzMaOUAeZxpB3UXcCr/IFk9n/MFF4RW9uV4xgdbmvnv5d/kde96D9379pU07VHhGnKlvR2uvhpefDF0dX4wKLbq3nF4jIbnC5/G2i0PQsg0lPfMLkOacrqaguL1VlRQ1d9flEuqHPj5/Qvxyzs1RzISyYxlseOV7rc/+lGmbt3K++++u+B2H7/j+VJsu1I5iVdUEFuzpuhxEKO6jSCQtFHYvBmmTYN582DtWms/5e9n9+6yvhT5lpLDfBSl/HBKUYr3688e5MMtNW6Gv1RtFWGeQVhK3XDqVyvwywchMxgwbPphKYUhLve3ORjGIBGJEG1uhj17BvTOnj2WTlqxoiSD4bwMgRlZDNYNDnGTxW4w7A+qpYXEvn1EE+5T+YYlrF/drfTkdtzrQ8lX6dnTKlT5COGNwGB8dG6+b7fjhZJvY3Ah112IkReyVwPzSicnn5A3ZrCUsVvbRzHvjl/cQtL2NFStrdYBF2UfLZGyL4RR00ZQEubPhxdegGTSmgV0167M7+jtt8P06SBiPezW1szv9CIxabwa4PxI+3gPTjuBRF1L3vGd/v9CfN3OdMIQVrawYUuFs92gkM3rHgigEf9ps9OUur0kTK3KeR1+5Lp/ChSsCOxGOp93L9/0/XB73p7706dbcxql9cH06dbU8enZg216I/P7hReGdPoLYwhKRVgjYX9JCG8E0h9t9ea/0td6OH3jjshLifg15BbScBqGoGuLRwYa7IeLL7ZUuPnK7RRrAIIGkQUZqrA4w0aSKWU8CA/L2XjrNMKFGIRi7knWPfVT9i+8YE1gl9YHQ6zkw2DaCIaKEEtT+vn4NVrJ3lM+zZhHVhHt3uObVZA/tdT+VvtHWozSGU6oi2oNa8Ddjjv3/dxmbuRj/AslKH6iApIVQsWB0uqQQq/Z7T0OEz9f16TCiJ13aFSPIxiO7F56SUFGgNTxSKKPlvtWcODIk31LR27peO3b09GAzU/ujIyEd7MMRxT7Kmf5d0MVrBGhA+kNHLeHcS7bGHSfw96vx4+YWrAbJUy8aD/E6yMlcdfY4/u54tzycTOsbu99WBdQ2LCHEmU3BCJyiog8IyL/EpErXc5XichPUuf/LCIzyi3TcKClY7/nubAvoQD1j/+cPaf9Dy8te2ggbjQKra0cjHqPlVBy57PJxz/ulWaplXuQQcp3C87PXfl7EXSdsVQHAqdicqZbyL0Pup6Xx7YEhCCru4ibMg6iemeCX68/gkfWzggR2puwrpl83KjpOH0tUbaeXp+fPJMm+d/f4TI/UYko9+L1UeC7wHuAmcCHRGSmI9hHgL2q+mrgOuCr5ZRpJCDV1aEVqQAt936Rmmd+k2lE1kQC3b2bqoS16riXQizlYuHFuiH8aj+lrFH4GYWwyt8tXjnPB8Vxxt/e1MA3zz6VUx59Ivg6bKOxvMImfcZeHhwbZcK6Tk6anzswspw4jZbzXPp8b0uUZxc1M/mekPN0iUBbm7XWh1cQIP7hD+cj7rCn3DWCE4F/qepzqtoH/Bh4nyPM+4DbU79/BsxJL2g/GlGAW26B6urQcSxjsJwDR57MS8seGnRXzHBo6M2nRC1AIgJXLJrCa9ccBxR3DWm3hd95GJhqYvW73sw3z57HzXNPKjDH7HS7q6v433eexA/mvJnb3vUWXhrbzCfvuK9kH3fEKk/kXGOiSvj3R5qJdSep2ltc1+lC8XIpAfx67QyevbiFo76zK9yzjUazFq+RdFdPt6ABnQFGGuUeRzAZsBcVtgJv8AqjqnER2Q+0ArvsgURkEbAIYFrA/DzDnqB50NM9DBYsCJ1k2k0EsPXTjzBx5ZmBjcijmVgSPrtmK1+5eWtJGlVDhUuVb7rHNAHQ2TyG6854N81dB4jaGh8TEWF/XS1NqePO/XSeAhyxbTuz//k8H/r1H7LyKodhtvvP4w0R4tVCrFvpax384Uji+O/k4bUzGP+7bmZ+eWeW3G7hBaCyEm69Nbt3z/XXB3boOFQoa68hEfkv4BRVvTC1vxB4g6peagvzVCrM1tT+v1NhdrmlCYdAr6FIxOpm5kJWj4QQC9S7xdeKGvb+v2XEx0xi7N3XEO3e46qswqZrX2LSi7C+2+GCW8Nt2Hhurogw1/+ts+dZrgfgEz+9L1TeYdIejPtq70Tgl59SfjeDlwz9dcLjX55Izct9zPjBXupetmopQfLKmDGwd6/7+UNsxbKhGln8EjDVtj8ldcwtzFYRiQFNwO4yyzW0+LxAexvHkGni27gRaW5G9+3Lq81A+ntouXc5/S0zkAPuRgDCl2QjI+9996QQA2CP49VY65ZuWmGlz1Ud6KG3toZP3LE2tItuuBjPoBK4PZxbLx772gjlQIF/faSZw9Z1cNgDXcR6B+RxC5uWUyoqPI3AaKLcxvsvwJEicriIVAIfBO5xhLkHODf1+7+AX+sQDG7o7d3Bc8vfRHLaJGta2WjE+p/P1tpSVG8CBe45+Z3ZaaZmGfS7IV6NwRV7XiCi+fvPC2lPCHpg6Q8vH89qKV8CZ8+ofOKAfzuAm5J0Czv3ny9lwg0XBV8OnO+PvXtsse9ienN734+5YQ9T7hkwAriEs9dsALS/3/ebdqYR+v21TVmtkZC6JBq1/sdiqAjdTY0c/N7NYXMsDlUt6wbMA/4J/Bu4OnXsC8Dpqd/VwB3Av4BHgSOC0pw1a5aWmq1fO0PjVaJqldcL3pIhN6/4uxqadNr9f/JMu1j5yrmFlS/sPQq6V2FkKSSNYvP2SvPpI4/Sb5w9b9g/x5GyDdV9LOd769zilRWqbW0l03PABtVcnVr2Vh5VXQusdRz7rO33QeDscsvhxcOPzCSZ7OWkb2wm2qtFp1dsSa+lc7/n8P1MdTa1b/89HHDK5xdusGTJN79C7mk+cZbdsbZs7pGimCaweVhKNuwYzG8u2tfP/kUXseb+n+W9LGU+jPqRxSe96REmTDid6p1By1sMHn6f43BS/G4EdaUcbPJ1bRVyf/O95mH1DK9qgM81wpbh9NQMdhoP9HDRjbeWNY9RbwiqqsYTi9ZzcNzwmZH78yu/yfT7/zTUYuSgDIxGTu+7qQ97jWUkUGgtwHl9XvcjKI0hvU83dMGL8VBCDLfnOawMahk52NJc0mUp3Ri1hmDngZ2c96vz6Fizmt57fsie11eRqBj6V0uA8+65g3m/Wx9oDMoprbps9jzVEdaNoa4dBHUbLEQR28MrcKCiMpNXIc/j8Mt/yf9Ne61/oHI+6C6FHYnAPPyMf7mfsVv6w80olYv+SISNrY0kp03n4C23c/c3H6N7f29wxDwZPsXgQWb131cz4e71VN/+v7yut7+otErtq48A3/3SNVz6GWVHcyvj93r3pvXKuxQyaUUN9PeEVnJBtYNS3qOwbSXOc+l9+7F8DJYzXk1/X1baftgXbE8TPdjDWzY/6R7hsw3WuIPPd4SUrjC0LgKzorAh4fkuBR2zFxTCPOugZ4btvFu4sM/bLd0w6eSbdjniAVQkk8x+9kXr9yWLqXvrJ9gwsY63/fcxBabozqgzBLPaZtGXsD7eB+7cTmWRRgDKU2CLoHz3y9ew5vT/4rT//Avj43shoQN98VJfnHyxsywypccjBL3E9o/LL1yhhsQrvlMhp+P7hfOTo9D7JUA8EuFV9/2BF97zRv97oLlXGI1FXUIOLnJHj//5fNMrIIzz2XVVVVPfezAnrN/7mPQ5p8DSzyznvre9i76KCiSZ5IyHH+CqW77D5J3b2VvfCALNHftJRiJEk0n2NDSBCM2d+9lb30hlfx/1B/O/V0UXyFJpRPsO8sY/f58fvnouT/32ZaKxCItvfHuRqVuMOkPwqzN/xTc2fIMHXniAw3YXbwQCqQBioD0gNaljPUBN6r8PlpvoZ1zymhX8fM4pPPHHsxi/dSf0xaBGoWMQ5HchrPIPE99OKYxXOv1B7dmRTPKX/z41MJy9RqBAP8I/v3WWd4TtPTChxvv8IYoADSkjAOFdUUF+7sdeN4v7L1nI0v/5Cs9MncHat87h7jnvAaCyv5++iorU7z76KiqJJJMko1GrI6eItcgMZM0M8P7193PVLTcyeecrHKiuoabngKscpXofG7p2Eq2I8KrXj+Oks15dolRHoSEYVzuOuoo6kprkldYKJpXbGPRbm0C24g8wAmnSbiKA/3jnncz651Os+eYXGf/cv601TysT0Nfnn0gRBJXc8qUkLquANMK6J0pFUoTxe3cHD6qz1QgEqEzF8Ix3Uxz5bP7XMty6FRdLKa5FgEc/cAp84BmOePJ53lQZY8kDv6Bi+XIO27mDZETorKmjqbuLZEToqqmlqbuLRCRCJJlkb2MTKDR3dbCvoRHVVFdvBgxQfc+BEkjqLnuarvrxJOJJKquj1DVVlSyPUWcIAPYc3MM5R58DX6rm4EevoLpveM8kGEFZ+aWrWfmlq9nTOIb/WfJJXj5sIjd/4UrGl9EIpLF5oobcCBAyjbDuhGJR4OFZb2Tuhj8Gh3W0ETjbK5wGwc9H7ifPkzNP5w9vWszi78/LI+bQUu6OD1JZaZXkW1q4NZnMTCuRzjeSVJq7OzO/x3R3ARBL1QJabeuH2NcSKbfBjYsQSxUg4rEq9EsreM3YyRwodYOx2yiz4b6VdGRxW5vq9OmqIqqtrdYmolpXNySjFsNsSdDvn362Tlj3mM77zm36SnNr2UdZFpv+cBxNW4xMbiOX/dJLgm466uii8w6KlwR98B2f0a6a5pLfq1KPth7O78aw2erqLH00fXpJRhjjMbJ41HYfzeC16HxXV/YjaWsDn/nJBxMBzr/nDv713rcybdtLvO6OB9l0+JGccd332NE8PGQEsrocDjdXhV0m9QtoC2PfStGeESZft3h+qESo7O+mrmdvAak70iL7egvtIht0neV8N4Z3Xd+FujpL16T1TleXpY9eeCF7iuwSYwxBWObPt4yE0zhMn26dT69NW8SaOvbBWkEIlk9y5ZeuZtvcWTR2dzFxx3beddP/sjFlFDYOgXFIK460jKVKM8yxMOk4ZQuj1NNhJq7/K2tOP7vg/J2kjYFiKfAgkuLfw0iBl8fP5EBNiCUq3dInvPIPun77+aEoBKTzH3YKrq7OKlCKWLrDrvTTir+MCt8Tt2rCcN/KMelcSfFyN6V/gyYikZJPUpVOZ/P4ibr4M8v15Fvu0AnrHtP/+OkD+tThR+r7rvuebm9uDYzvlCWMbIXEySe9oUjHeS8mrP+rJmz7PdHweTldQ2HufxIp+h1JOOKXapK0JLlyl/oZlmsit5Jvkpqs0vZ9azSa/b9Erp1iwcM1lHNgJGzD3hB4cOCWW7S3qmpQXuj0h7OrcYwuvmqFTvj133TC+r/qpZd/Xt/zndt03nduyxiHJw8/MnPsleZWTYi4fnzFKPtCjEmhYezhilEi9jQmrP+rLr5qhef9COO733T0Mfr0kUeV7H65Pe8klvJPFJBGIfemFLLbt67aFv3JmSv1oZOX6YGqxmFnAJKkCnHNzSX13Q8WXoagrCuUlYsRuUJZezuJ884jGh/8ye3STzgpQkSVRGrAzPaWsXzh4o+z8VVH88zhrwLg7Ad+yZaJk1hxw9e4+mOXZ/2fum0rDQd7iho1GorZUXhRYWcyJ72gtJ3ne8ZW8tWlV/LaJ5/kzLvvzmsSOgG+mVpV7OsXf5HNc2dTqZZMB6NQ7bJMr5d8iuV6+edRR3PsP58JKUV+sg4nwsjUXdvC/e++lkRkoPPihO1Pc9Kfb6EiHvyeDRUK7DvzTJrvvHOoRckbrxXKjCEYBJYvX86l3/gGY/bvDw48yDiNxNbxE/nShZdy9ztPQYCjXniOf844gqNeeI5nZhzBhD27aL/iUo57/lnfUZzvv+57rLjha3z6k1ez5Ce3c9rvH87vw54dQ99Ty/ljPsety5aF8vW6KZ9rP5uZ8ZzPfeELeY1w/tbZ8/jWRdeSiFXw6/PP4tgXnws0SH4KUIF/HH0MqJbMGAyFEQiTpzNMd20L952ynL1NU2nseJmIJuiuG8uUrX/jrX/4LtW9A9NoDFcDYKcvFuPfr3o1UlXFMU88PtTihGZQDYGIfB14L9CHtSDN+aq6zyXcC0AnkADibgK6MdIMQWdnJ/WNjcOiD37YvCC3BpExEnPnse2dr/dVeLvGNPNf37g5U9P4xaXncuLTT+Z3DWNBlzay9yZo3t7hq4TD3p8wNYp4Fdxw+kBN4EvXf5nz77kjcGxCmLT/cdTRVO/fx+Hbt4eQ1p/BfifyHc/w0zNXZva768bSU9PCkc+uH1aKv9B7qMDLn/oUEy6/nNi4cSWWqnx4GYIcX1EpNuDdQCz1+6vAVz3CvQCMzTf9kdhG0D1uXP7+13QjlNMPmW6MHiIfaTzVhhAULgkaJ9t3nbe/93CxtnzijAsnm/13ejtYj37j7Hn6jXNO1UkPPqoTfv23rAbinGeU2tzCuMVxNhiXazxBKZ93vnkmQbsr6vTGRev0gXd8JsvXP2z8/XV11ndk+5byka03FtOXP3ftEGmTwmEwxxGo6oOqmnaG/wlr0fpRzePnnEM8mtv9T50H6uqQdJeyZOrVdPYhTo99UM3uwlpE19WwCBBVDSxFpc9HGeiCGKGA0tfzam35sDM4yFZbKc7eTbKqayBMIhrLnA8ijKvErU970FgGr+PlHonrzEcdefrJmwRq+rtZevNc3vXwl6np7ShqHEJJSH8b6S6b6W6atm9JPMYKOa81GY3SffrpxHftKrvYg8VgdLO9ALjf45wCD4rIYyKyyC8REVkkIhtEZMPOnSG+9GHGSTfeSOz227NftNbWAaWf3vLtR2w3CsnkoA9881PR6X7yw3FQz1SXd+jbZ53CN885NZRBtYdISiSUgks3FLvdM6/4g6k403IVY/iEbOM/5P7+1lbrm/AqVNlxGyuUNhDTp2f6/kduv53mO+9k6o3fGdRLKScFtxGIyDrgMJdTV6vqL1JhrgZmA2eqS0YiMllVXxKR8cBDwEdV9bdBeY+0NoIho70dLrsMdg+sZ+As2Q0GmtrKVuoYR6haQBr7PbD/7qqu4va5b+ZgTbVv+4AznaB7mr7+ZxyGYMiVJOHeh+Ekry+trXD99UMzIGuE4NVGUPC3qapzVfU1LlvaCJwHnAbMdzMCqTReSv3fAdwNnFioPAYXXEo4Q/Exp0uGB2NlmuMwzwpi+h58+6xTso7XH+zlqJd3AAQaAXs6YdxkEcjpKeT8KAorkgWjHr+hNG6vISVd4le13nVjBAqiLIU0ETkFuBw4XVVd52YVkToRaUj/xmpgfqoc8hhs+DWBldGtJEBVPF5yYxBWeTrDffusU0hEoznTehyoruTri5eDCOf5GIFC5QB3l4k6/odNN0y+4vidrqHkw3PTppXNUHni9Ou7vbNG+ZeEctXWbwQagIdE5HERWQ0gIpNEZG0qzATg9yLyBPAocJ+q/qpM8hjCcNFFWW6kUpM2Bklge0sr/dFotnJpyq/smY+byxkuEbFe/XQDdlqO9/3hr55xgtIPoyjdwiiwq6GO6888JSdPuwvLub3c0Mo/WqZmpRmmPaYQ3/2rNm/OM0aRTJoUzq9vKAnl6jX0alWdqqrHp7bFqeMvq+q81O/nVPV1qe04VV1RDlkMIamthZ6Qq+X4EKQM026S8Xt2E0skMnEU0P3hy5zFtHUENdZ+9ZtftBRQnoSZnM1ZOk8r7h+++2QOf2WHa7oK3HvUScy+5Adcduon2VvTAMDEzt0cs2dLlhGK2OKEwW+cu7Pm8Ojs2aFqKCWpObz8MixdWoqUDCEYdpPzGYaIIo2AAv1ReHZSRWh3hdvmlXYcePqoo7NmxixUToCJ27Z7Kvtz194dKo2w54N6Vr04roXa3t6s2kg6ngK/m/Y63rzlSTZ898Ncf9+3aOnpzLlnzhpJ2BqK33ym6fTT6bxhwwb+PX164PUAJJHijcLNNxcT25AHxhAYCib9oXdXCVcumswJ3zuOs5YfxYr5h2XO541LE4IAGosx/YXnM/uFyptWbh/6/WM557IPKF2VVXl19SyUn7/1RBbf++uMHArsbmhiz2nXIMBJe7dlKX8v3IwBBBsip8sp6cjFHubVL74YsnHcyjUerSQuFYUZhYTLZE6GsjAql6o0FEf6o/7JO8bwpQ9PBqC6N8mcv3bw7Ru3ZMIVpDA9fBWxeJyKApKz1x6cXUZVxLNr5CtzZ2Wl4QxTqm64Alx2169QYE9DE9d89HLunjsPVNnwYBdbj3s3qDLla28LnZ5TtqAurs7riqD0R6IciFXS1NfjGiZsb6OKRB8K9FXU0Fk3ntZ9L+ak54nLAExDeTCGwGBRU+PrHkorwwRw9aLJrH3TmMyJmv4mHl38h7KKF4/FqMhj5lZ76d/tOMCnfnofWxvHMaUjuP/p9nHjOCw1CC1fI+Dn8kqfm/jrv6UOWr1hVv6lJ+O6qv7rz/PIzX2MRJjxDva4FckEndX1GUPgDJNvQ31Vfw+V+17kyWNPZ9K2JzIGwZ5nDot8x5gaSogxBAaLAweyGoydfc/tpf80lf1JNix6ml3Nh5ddvEKMQBh+deSb+Mhj9wSGz8cIuLlk0hP4ecroaK84cc/A9bauu876UQG8uQL+EofuYEeLU2Hno8AVmNSxixlX3EtVvI9nvnlmTtr5IsBrn76HJ489nZ+c830A3nvv5Ux92aWn1pIlsHJlThqG8mAMgWGAAwNDPv7j9tcOHNfMH1CyXEBJhHF7ny8oO7vLxet3vumF9aOnw1z42D1Zcd3S7KmspLavLy8Z7P8BIqo5eaTPdVVWIckkmurSWunsAyoRtn7yAaZ8813wSL91rAZ4Q7BRcOuOGtYYbGto5X0bH+bqh78fGDYfd9Frn76HY559kEfe+nHu/3+fJ9bXw8HaZiKVUZbc+I4QqRhKjttMdMN9G4mzj45o5szxXMJy45HvyhnmE2YWx49++lrd3dCQCfvV8z+lvRIp62ycyYCwbksjOmcVLWYVtTDLO3rF23LFb3XLsodyz49Fk+MioeU5atldoa/hY6d+Uq9+15KCnkfez2wErfI1ksFj9lFTIzD4M3cuun69a2lPgJnPPuR6PIjrv35tVrhPr/lG0b2BnL+9wnjxzJFHZUrlaY555h9Z+/nI6JdnkLzZgSNEd29m/I8+lntuF8jhil7UgHyp0zOvdD690eAmd01tDX0H2FnXEhjeK40wLjQFtKLCdF8cYsz9N7jT3o7WN3gaATtpxRGGdNhS9MBJ5+nsIWPPxxkm6ZFXZ1U1GnIa76BrdfZScosf9n5ZEZIkWqex7dKfs3VZruHleUUqBP1sg69MCqxd81H/rBhwry1/aDU3/fxLBXUDtnc5DQoX6e9HFywwA8iGEGMIDLksXYouWIB0d4UyAkF+eb+wxRgBr0FT6XOd9eNdfeRu1PceRDQ7tSMdtQF7Gl4KTh3/vWQToC9a5REqm0TjBFAlunszE1d/wD1QZxJZ1e15P9JyH7vzBfZVuxsM+7Pwu0YCzqVJX2fYsLpqFcmGBmvWXMOgYgyBYYB0LWDVqlAGAMIrcTeFX4gRcNYovFxWAA1d7lM2OOmJxvjHUUdnuYWOeuYfmXn1w8hkz9vrupxGsCLRiwBbx7vN5j6Q9v6TF4GIf63gQBJ2DgwFc8pk//2tt/w3fZHcPvphxhnYj4WtKeRjDCJdXaZ2MAQYQ2CwyKMWAHko8NbWkgy+8lJsXmH8wjmpTmR3TX3VP/+Z12pqTkXn1UbhxeQdr/im3nPcu1KJJKnZ+GBWrUABZgjURByxYF9jo2uKn193E5XJcKN2/eTO1xgEpZeV7qpVxhgMIsYQGCwjEKIWkBciMGcOevCgd5AQyThrHjlx6uqgrY1ETVPuuRMriEcrA9N3xqtQ5yQL/rKFCVf4vbXlIhF6jns32xb/JLu77QsK13XlxGzq6Mg5BuE/+rCuHW1pgTlzyHdlt6BwumoVzJ0bMoahGIwhGO2Uwwi0tsI732k1NHd3+wYN8rVnFJHXfPRdlgKM9OzPTeTRfmKJ7P7/9gbMYmsq/7HmuCJiDyBYjdVuJOrHQrr0rgq9B5i4+gOW3Kl74HkPRYI7cS5Z4tuukJbPKw8BZM8edP16OPbYkq5nIYCuX49WV5t2gzJjDMFoptRGIL1a1DnnZPU2ChrglcbTr93W5jsffXzpMn93kc2I7O7ekdXO8PRRR/P00ceEKs06ad0xMP1CIffQHqe+173mtP9tF0Pany8CVbVWO8Gn1gXmLarBCnTlSmTJkrxk9TqvmzbBhAnWqOAC7qdXutLba9oNykzZDIGIXCsiL6UWpnlcROZ5hDtFRJ4RkX+JyJXlksfgoJRGIG0Adu2C//s/13TD9ECRJUvcS60Bi5JEO7Z7pitLlmTiz2qbxTc+cUJWo23T7t1WHjb6JRLK7fPIFc+5XocTIXfBmKTk+vR3N+T69JPVjUT3bIFkqh1DlWjXdiZ+9q0DcadP98z3QJj5elauzLnnduMQ1BPKnp9u2oTeeissXlz62oFpNygb5a4RXKcDi9OsdZ4UkSjwXeA9wEzgQyIys8wyGdrboRRGwG4A5s/3NS5eJX/q6qwSv2rhc8v4lT7taSpcc/uuLFkm7d6VOqcZg/DvI4/0VX55jwMgt0Qd0dy1xFo6c3364+68gsSYSRBJjf0UIVE/gW0r/szWa35vHVuxwlOemgMHClOeNuMgbW1QWRmqvSBTgl+1Cs45p6TLn2aMgXETlZyhdg2dCPxLrdXK+oAfA+8bYpkObY47DhYsKC4NEav6nzYA7e1ofX3obqdZJf+uruKWIVy6NKdE78XD8Ys8XTADAlppbTvsMN9uoG4fTrGG1autoOq5R1O1gmRGxmhlHxOvONHanz8f8VC2JVGe8+dDb++AUUj1BPMjk+///Z/1npTIIAjAwoVFp2PIptyG4FIR+buI3CoizS7nJwNbbPtbU8cM5eC442DTpryjKUC6j/306fDDHw6UtNvb4dxzke7u8EagVLNKtrf7Gh+ny6T/41e6hj32n88Qjcep7O1l0ssvU9nbS3dtnWe2xQ608sPNUI3N1ApSz0CERF9ldq3g+ut9XVP7LrmkSMlSzJ8Pu3Yhc+aENwZLl2bilcIgqKpZq6DEiIYsTblGFlkHuI2GuRr4E7AL69v4IjBRVS9wxP8v4BRVvTC1vxB4g6pe6pLXImARwLRp02a9+OKLBcs9aimkAU/E8vd6KO+kSKjSRMmNAJCoHUPUrbdQOj9bI/OstllsWPhX30Zlcfz2qhHkezzonJMkuSW0ned8lXjTFBLNk0EiVq1gbA3jF7+OaEOqi+zYsbB7t3f+RXzrrhx3HLppU2EFgPZ2uPpqKOY7rqiAkLPCGixE5DFVne08XlSNQFXnquprXLZfqOp2VU2oahL4HpYbyMlLwFTb/pTUMbe8blbV2ao6e9y4ccWIbQggoy6cpX877e1odXW4EcjpdoASzy/v2mU0hbS2ZrmcHu6/yDct5+jboSSCNdrZztifpmoFYqsV7D4YulZQFjZuzK9mYB8TMH8+vPCC5YoL6W7Kob/fNB6XiHL2Gppo230/8JRLsL8AR4rI4SJSCXwQuKdcMhn8USAZqxxovH3hhVz/fcoA6IIFSG+vr9LMlASLbQfIEwW4/vrM/qy2WciSTwSOSE7YvA2ddbnzFLnFKRfO0c4CVEUeI7pn60CbiCrR1uqstgJfytHIum6d9b7U1QWPQvYaE5B2NxXiNjIL3JeEolxDvgmL/BA4HuubeQG4WFW3icgk4BZVnZcKNw/4NhAFblXVFUFpz549Wzds2FAWuQ9pfNoIFOib+UaqNv6xoPhu6ZXaFZSTh4i3i8b2XnesWU3DBUvCuTBcfgeFDYubyyesPFSCVggvLf31wLgCOzFhyvK3eN4TgH1NTYzZty8PCfIkZJdkBWTOHFi3zj1Aezs9ixZRfeBAuHtcJh12KOLlGiqbISgnxhAUQcqv66Rz8VdpXHW5dzyfdQmcDIYRgHCGYFbbLB65cBMNHr2FlIGpqSOO437tAPaRz27nvfLK13j0RGPUEIfXROGJBDvP+irxsTNINB1mtd+oUv2asTSf8WqrraC+HjxGc6sIkszttlpSGhoyo739yNwLv8GCYQxLNAp5LGM62ilLG4FhBLJxI6Kas/kaAYCQRgBSH/gwWW/2d799XWCX0U2Hvzrr2oLcQoNJdSIOx0XhKKtBeNydV1D1/J+tkyljF//LpkyDca9P46lMm1ZeYQFWrw49sRxgdWX2clmtXIm0tZEQ8U7TLHBfEkyNwBCOfHocTZ9utS+UmaAawdVLZrB89YuBSt3psglTcrdPU+EcLFfKGoFbnK2fftjTPTR5xVu970nAVB0lo73dakMKG766Gnp6/MPMnQvr12cfMwvc541xDRmKI6whqKyEW28dFIUTZAg6a2ppOOitYLy6iYbpHlqIa8iPfLqkJupa2feOpfTMnGs9l0Scqn88TMv/XkOksSpUu0nZmTEjdNfQQZdtFGNcQ4bimDPH97SC5Z8eJCMQxB+Pq6fexwiAu+Itl1uoUDXnNq3DtsU/ttYoSBvnaIze497Fti/+PnA210FjxYqc9Z8NwxfzpAzhWLfO2xi0tlpuh87OYWEEAN64yX+ks10xZs0C2u29qlmYifQ8Rzn7yBKE0xhMXP1BIh3bc6anPmzl2WXJvyDmz0d+8AOorw81zsAwtBhDYAjPunXus4Om5xwaJoTt+uk1oZxXHCdB4xKC0gyTlv18PFXCjnbvJlk/Nmd66lcu/TkvuS1jOVTMnw+dnVnzE7nem4DapqH8GENgGLH4TbTmRZCRKEfpVICeSvcF4+0EGYyoretnZiK69CymLstYDhvSA8ZUrfEDNnzHExgGDWMIDCMXlykV0mMC3AgaMNZT2RDateI3JbPb8eq+To/Q4dJ0ytA74wQSLVNtU04MLGM5rHHWKo0RGBYYQ2AYucyfnzPXjV+Jvt/W41Lq6yGSvQCNl7L2cvUU44/3mynUy2UVpp3Aq0Ygdd6zqRoMxhAYRjbr1iEzZ6JYK3z51QYqEwO/Wb0aEgkkz54thXYPzQdnV1awRmqLbX1hr3aCbZf+nK2OdoIkwE03lVdow4jGGALDyGfjRmTmTNcVvtIoMOvmmQPz3KQbtxOJgRkwPePmp/q9GpY768Zn9vOa6CG9etvKlVk1oIF2gvQkdC7tBCJEBmsgmWHEYgyB4dDgqqs8Tynwp2Nr+NWn/4nMnOnql+5cvMzH1ZOr2jtqvKfg9jpu75qa9eEFdbG0T8Gwbl3GGAy0E6RydLQTKFjTiBsjYAjAGALDIUHnhRf5ltuXfOoIxs1+G2zc6Hq+vmu7Z1y3dBt6Apa8dEHF43Nbvdpqs/DIu2+pY2nGlDvssHQ7QbrnUCJOZP92Jq7+gFXzmTTJGAFDKIwhMIx82tt9RxGrwEnRV/lOe+wZF6yVsFzS9AzvgdgWrM+ESy+g4zNZW0WHy5mNG4m98Xiq//1HwJqFlEiUmn//gWj3HqvR+eWXzULvhlAYQ2AY8fScd75vI/FP3t7Mo9Gt3vE/stS/FWDNmpw1csVH43uV/O3HBSyXTnoBnQIWldn6xms58PozrHRSW/cJ7880Fgtw8Pzz/dM1GDCGwDDSWbqU6ni/b5DvLzmBB856wP1kezvVvd6NzDJ9uqWkb789lDgqENEk8Whl1vF4tJKIOpqIVbMMgN8AuRz3EDDx8hOpaeqGeF8mvejuzVmNxVVmOUdDCMpiCETkJyLyeGp7QUQe9wj3gog8mQpnphM15EeIhUv21Ud529S3MbZmrOt5v9qAAqxILZg3f37GdePbUJwK9NSxp3GwqgEFDlY18NSxp+UGnj49e99nzWE391C0sZKejjqIpYyOCInWaVldSDPrBRsXkcGHshgCVf2Aqh6vqscDdwJ3+QR/RypsztSoBoMn7e0kA4yAAl/574n89JmfMqttlmsYr9pAZpU1F5dNUEOxAq976i6qejvpmXEi1b2dHP+Uyycwb172fgHuoaojm4lW9vt2IRWg78Mf9k/bMKopq2tIRAQ4B/hROfMxjD56zjvf9+VV4I/H1vLrtx7GqYef6u4aCiolOxY9SZfJ/RqK7QvVCFDzwqOuYQXYetfPc4/n6R7qfW4fib4Kzy6kaSqSSWtxF4PBhXK3EbwV2K6qz3qcV+BBEXlMRHzXnBORRSKyQUQ27Ny5s+SCGkYQc+cGtgv0xoSLrziCg4mD3P/C/a6uIT+3kJtCzij4PCb396uxTN6+LfdgkHvIYbwmXn4ikaZKMmYqmcx0IXXKoevXGxeRwZWCDYGIrBORp1y299mCfQj/2sBbVPUE4D3AJSJysldAVb1ZVWer6uxx48YVKrZhpNPejgasn9wvwucumEyECNMapnHSpJNcw/m5hTK9ebzOl+C46+ji+fN9awXxCxdkHdv2tUdJ7u8jY3IiEZJNE1wnnxMgvmBBznGDoWBDoKpzVfU1LtsvAEQkBpwJeE6HqKovpf7vAO4GTixUHsPowK+rKFjK9ZqLJrP2pDEkSbK5czOPvuLingkqGTv99bbwYUYUpyeP86s8eH58PrWC6MFsWTI1gkzm6lojyMQH04vIkEM5XUNzgX+oqmsHbhGpE5GG9G/g3cBTZZTHMNJpb/d1CSnwlY/O5FcntQAQlSgTaie4tg/k6xbqW7pw8FbS8mk0FqBn2XmZ/WhjJTXHWNdLzBpLUNO7mUj3Hs/4yVWrSier4ZCgnIbggzjcQiIySUTWpnYnAL8XkSeAR4H7VPVXZZTHMMLZvfQS/7UG5swh/qEPoCiV0UqSmvTsOpqvW8jefTNvF5D4mBCPmomXewigens8az/R1U/dGyYyfunx1L1hIol3nBK8+I5pKzDYENU8Wr6GCbNnz9YNG8ywg9GGivgbAlU+/vDHGVszlrOPOps7/nkHu3p28e13fDs7cHs7umCBa1rpdLzy9luLwLnwjR0vuXc3NtG6f1/uiSAZg2YUDRhjcbCiguq+Pu/4hkMSEXnMrau+MQSGkYGPYgSgvh46g1cBA+ipbqLGazRxa6u1BrODfA2BtS+Zv154GR57nm70TIhR84p/zykqKiAedz3ll6/h0MXLEJgpJgwjgiC3EKtXh06r0N5CAD0VMf+2BdsyjJed9snstPNEnCOPbTjdQ67cdlv46a0NoxpjCAwjgpaO/Z7npL4+/HTL+fYWclDT713CdhqRK377gwFXkUc7gW/7wYoVxSnygEZnM9rYkMYYAsPwx2dEbL61gb5zL8yrt1BeOBTvxI4BF5OXGybi557Jo/eQZziPdQ4gNdrY1AoMGENgGO6EGECWT22gIuE+T1CQW6i/MToQziu+g5cbB3orJT3WRk5Om+aZJ5TAPeSzzoGpFRjSGENgGNYErTyWz+LzfstRAv4l8Bu+QzISbjBZhhVfGlhsPpmkp7Iq63RPVTXRL33JTyJf95CGufQAI2nmIDKAMQSG4czSpf4rjwFcfHHo5OxrBjsJcgtVnLuEg9EKb0Pi4uufcumFHKiozuz/4LQz2dvQiAJ7Gxr5wbVfDlTUD09e4S2z6xwVLuF83ENmDiIDGENgGK60tweuNSCxWM4MoX6IhxtGgXUnLA6MX9Pv3l1TAVnsHv8rbzuXvojlVrr4rh/R3NmBAGM6O7h4avCcWSe96RH6Jza4nvNzG2Xh4x4C4yIyGENgGKYEzSmkALfdll+iK1YQt5XQ0+lsefsHeNMd/5NfWk48DNIX//YzKpOJnOMCJC4MnvOnqmo8Oz8xh0RV9t1IVscGFs0JYv58ZMkSX2NgXESjG2MIDMOPgOUn/RaN8WX+fGJrbuFg6yQUobNhAg/N+QzPX3QtdU1V/nELdZ34TCAXOei9RKad3f/vMF5Z/j6SUyeiAn0T64ncclt+179yJTJnjm/DsXERjV7MyGLDsCMp4l9CyWMUsRv3r/47tU1VHPfWSWz83csc2N/Lexb/h2+cjrpaGg94tFd4jEZO4zVCeEhG99bUwEHvFdbM1BOHNl4ji2NDIYzB4El7e/CUDHmMG3DDrvTf9qGjQ8Vp8DACCkjAaGRpbYXdu3OOJ2ua2HbN75my/C2hZCgJt9ziO1VHlUc7iOHQxriGDMMKv+6iBbuESoBOmeJ+vKUlWJ7rr0ejFdnxohUcuOBqJl4xyEtwzJ+PVFf7hzHuoVGHMQSGYYVfd1GBvHoJlZLIV75Cfyyadaw/FiVyww3BkefPR25fQ2Kc1TYRb5zAnnlXEp/zfqINlWWS2IdbbvFtK+i+6KLBlMYwDDCGwDB88CmJKsCSJYMmSg7z5/PE2e+np6UZFaGnpZknzn5/+NrJ/Pns/eZD7Lv7nySf/heRCz5MomuIfPEBMtf2eBtjw6FJUY3FInI2cC1wLHCiqm6wnfsM8BEgAXxMVXOWiRKRw4EfA63AY8BCVQ38Okxj8aFJZ00tDR41AjNtcokZO9a13QLMvT6UKdc01E9hrUv8W0dmM7FWKDsOOAVYKSLR3Oh8FbhOVV8N7MUyHIZRiq9byGd0rKEAfLq1AixfvnzQRDEMPUUZAlV9WlWfcTn1PuDHqtqrqs8D/8KxML2ICPBO4GepQ7cDZxQjj+HQJN8ZRg0hCHAPXXbZZYMkiGE4UK42gsnAFtv+1tQxO63APlWN+4TJICKLRGSDiGzYuXNnSYU1DBO8JpATGZKeQoc6XvMr9TU00NDgPq2F4dAk0BCIyDoRecple99gCJhGVW9W1dmqOnvcuOA5WgwjD7n44hx3hd88PoYiuf56ErHsoUTxaJTHzLxDo47AAWWqWsgEJC8BU237U1LH7OwGxohILFUrcAtjGE2sXMn63R28484fE0kkSEajPHzWB5k7RF1GD3nmzycKcPXVsHkzTJtGbMUKTjK1r1FHSaaYEJFHgE+lew2JyHHA/2K1C0wC1gNHqmrCEe8O4E5V/bGIrAb+rqqBX73pNWQwGAz5U5ZeQyLyfhHZCrwJuE9EHgBQ1Y3AT4FNwK+AS9JGQETWisikVBJXAJ8UkX9htRl8vxh5DAaDwZA/ZtI5g8FgGCWUaxyBwWAwGEY4xhAYDAbDKMcYAoPBYBjlGENgMBgMo5wR2VgsIjuBFx2HxwLey0QND4yMxTPc5YPhL+Nwlw+Gv4zDXT5wl3G6quaMyB2RhsANEdng1ho+nDAyFs9wlw+Gv4zDXT4Y/jIOd/kgPxmNa8hgMBhGOcYQGAwGwyjnUDIENw+1ACEwMhbPcJcPhr+Mw10+GP4yDnf5IA8ZD5k2AoPBYDAUxqFUIzAYDAZDARhDYDAYDKOcQ8oQiMjxIvInEXk8tZrZicGxBh8R+aiI/ENENorI14ZaHjdEZJmIqIiMHWpZnIjI11P37+8icreIjBlqmQBE5BQReUZE/iUiVw61PE5EZKqIPCwim1Lv3rBcj1JEoiLyNxG5d6hlcUNExojIz1Lv4NMi8qahlsmJiHwi9YyfEpEfiUi1X/hDyhAAXwM+r6rHA59N7Q8rROQdWGs6v05VjwO+McQi5SAiU4F3A5uHWhYPHgJeo6r/AfwT+MwQy4OIRIHvAu8BZgIfEpGZQytVDnFgmarOBN4IXDIMZQS4DHh6qIXw4XrgV6p6DPA6hpmsIjIZ+BgwW1VfA0SBD/rFOdQMgQKNqd9NwMtDKIsXS4CvqGovgKruGGJ53LgOuBxyVo4cFqjqg7a1rv+EtbrdUHMi8C9VfU5V+4AfYxn8YYOqblPVv6Z+d2IpMM91wocCEZkCnArcMtSyuCEiTcDJpNZOUdU+Vd03pEK5EwNqRCQG1BKgCw81Q/Bx4OsisgWrpD3kJUUXjgLeKiJ/FpHfiMh/DrVAdlJrUb+kqk8MtSwhuQC4f6iFwFKoW2z7WxlmStaOiMwAXg/8eYhFcfJtrEJIcojl8OJwYCewJuW+ukVE6oZaKDuq+hKW/tsMbAP2q+qDfnEC1ywebojIOuAwl1NXA3OAT6jqnSJyDpbVLmTN5aIIkDEGtGBVzf8T+KmIHKGD2I83QL6rsNxCQ4qfjKr6i1SYq7HcHe2DKdtIR0TqgTuBj6tqx1DLk0ZETgN2qOpjIvL2IRbHixhwAvBRVf2ziFwPXAn8z9CKNYCINGPVRg8H9gF3iMgCVW3zijPiDIGqeip2EfkBln8R4A6GqHoZIOMS4K6U4n9URJJYk0PtHGr5ROS1WC/PEyIClsvlryJyoqq+Mljygf89BBCR84DTgDmDaUR9eAmYatufkjo2rBCRCiwj0K6qdw21PA7eDJwuIvOAaqBRRNpUdcEQy2VnK7BVVdM1qZ9hGYLhxFzgeVXdCSAidwEnAZ6G4FBzDb0MvC31+53As0Moixc/B94BICJHAZUMk1kMVfVJVR2vqjNUdQbWS3/CYBuBIETkFCz3wemqemCo5UnxF+BIETlcRCqxGufuGWKZshDLun8feFpVvzXU8jhR1c+o6pTUu/dB4NfDzAiQ+ha2iMjRqUNzsNZmH05sBt4oIrWpZz6HgAbtEVcjCOAi4PpUA8lBYNEQy+PGrcCtIvIU0AecO0xKtCOJG4Eq4KFUzeVPqrp4KAVS1biIXAo8gNVL41ZV3TiUMrnwZmAh8KSIPJ46dpWqrh06kUYkHwXaUwb/OeD8IZYni5TL6mfAX7Fcp38jYLoJM8WEwWAwjHIONdeQwWAwGPLEGAKDwWAY5RhDYDAYDKMcYwgMBoNhlGMMgcFgMIxyjCEwGAyGUY4xBAaDwTDK+f/CKECOzgka/QAAAABJRU5ErkJggg==",
- "text/plain": [
- ""
- ]
- },
- "metadata": {
- "needs_background": "light"
- },
- "output_type": "display_data"
- }
- ],
+ "outputs": [],
"source": [
"'''\n",
"For HumanML3D Dataset\n",
@@ -701,7 +614,7 @@
},
{
"cell_type": "code",
- "execution_count": 42,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -711,40 +624,18 @@
},
{
"cell_type": "code",
- "execution_count": 43,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "0.0"
- ]
- },
- "execution_count": 43,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
+ "outputs": [],
"source": [
"abs(reference1 - reference1_1).sum()"
]
},
{
"cell_type": "code",
- "execution_count": 44,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "0.0"
- ]
- },
- "execution_count": 44,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
+ "outputs": [],
"source": [
"abs(reference2 - reference2_1).sum()"
]
diff --git a/dataset/humanml_opt.txt b/dataset/humanml_opt.txt
index 718bce2..0a1f286 100644
--- a/dataset/humanml_opt.txt
+++ b/dataset/humanml_opt.txt
@@ -25,6 +25,7 @@ gpu_id: 3
input_z: False
is_continue: True
is_train: True
+joints_num: 27
lambda_fake: 10
lambda_gan_l: 0.1
lambda_gan_mt: 0.1
diff --git a/model/mdm_unet.py b/model/mdm_unet.py
index 17dd151..8aa564c 100644
--- a/model/mdm_unet.py
+++ b/model/mdm_unet.py
@@ -638,6 +638,8 @@ def __init__(self,
added_channels = 263
elif self.dataset == 'amass':
added_channels = 764
+ elif self.dataset == "custom":
+ added_channels = 323 ## FIXME: find joints_num if possible, 12 * (joints_num - 1)
else:
added_channels = 0
self.input_feats = 2 if xz_only else self.njoints * self.nfeats
@@ -740,7 +742,7 @@ def encode_text(self, raw_text):
# raw_text - list (batch_size length) of strings with input text prompts
device = next(self.parameters()).device
max_text_len = 20 if self.dataset in [
- 'humanml', 'kit'
+ 'humanml', 'kit', 'custom' ## FIXME: need to update custom info here
] else None # Specific hardcoding for humanml dataset
if max_text_len is not None:
default_context_length = 77
@@ -777,7 +779,7 @@ def forward(self, x, timesteps, y=None, obs_x0=None, obs_mask=None):
"""
assert (obs_x0 is None) == (obs_mask is None), 'with spatial-conditioning, both obs_x0 and obs_mask must be provided'
if self.keyframe_conditioned:
- assert self.dataset in ['humanml', 'amass']
+ assert self.dataset in ['humanml', 'amass', "custom"]
x = obs_x0 * obs_mask + x * (~obs_mask)
x = torch.cat([x, obs_mask], dim=1)
return self.forward_core(x, timesteps, y)
@@ -832,7 +834,12 @@ def forward_core(self, x, timesteps, y=None):
x = tmp
# just reshape the output nothing else
if self.keyframe_conditioned:
- njoints = 263 if self.dataset == 'humanml' else 764
+ if self.dataset == "humanml":
+ njoints = 263
+ elif self.dataset == "custom":
+ njoints = 323 # FIXME: set to (27 * 12 - 1) once input processing is corrected
+ else:
+ njoints = 764
x = x.reshape(nframes, bs, njoints, nfeats)
# NOTE: TODO: move the following to gaussian_diffusion.py
diff --git a/requirements.txt b/requirements.txt
index 951754e..9ee7256 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,26 +1,86 @@
-blobfile==2.0.2
+annotated-types==0.7.0
+beautifulsoup4==4.12.3
+blis==0.7.11
+catalogue==2.0.10
+certifi==2024.8.30
+charset-normalizer==3.3.2
chumpy==0.70
-einops==0.6.1
-ffmpeg==1.4
-gdown==4.7.1
-human-body-prior==0.8.5.0
-matplotlib==3.1.3
-numpy==1.21.5
-nvidia-cublas-cu11==11.10.3.66
-nvidia-cuda-nvrtc-cu11==11.7.99
-nvidia-cuda-runtime-cu11==11.7.99
-nvidia-cudnn-cu11==8.5.0.96
-Pillow==9.2.0
-scikit-learn==1.0.2
-scipy==1.7.3
-seaborn==0.12.2
+click==8.1.7
+clip @ git+https://github.com/openai/CLIP.git@dcba3cb2e2827b402d2701e7e1c7d9fed8a20ef1
+cloudpathlib==0.19.0
+confection==0.1.5
+contourpy==1.3.0
+cycler==0.12.1
+cymem==2.0.8
+einops==0.8.0
+en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl#sha256=86cc141f63942d4b2c5fcee06630fd6f904788d2f0ab005cce45aadb8fb73889
+filelock==3.15.4
+fonttools==4.53.1
+fsspec==2024.9.0
+ftfy==6.2.3
+gdown==5.2.0
+idna==3.8
+importlib_resources==6.4.4
+Jinja2==3.1.4
+kiwisolver==1.4.7
+langcodes==3.4.0
+language_data==1.2.0
+marisa-trie==1.2.0
+markdown-it-py==3.0.0
+MarkupSafe==2.1.5
+matplotlib==3.9.2
+mdurl==0.1.2
+mpmath==1.3.0
+murmurhash==1.0.10
+networkx==3.2.1
+numpy==1.23.0
+nvidia-cublas-cu12==12.1.3.1
+nvidia-cuda-cupti-cu12==12.1.105
+nvidia-cuda-nvrtc-cu12==12.1.105
+nvidia-cuda-runtime-cu12==12.1.105
+nvidia-cudnn-cu12==9.1.0.70
+nvidia-cufft-cu12==11.0.2.54
+nvidia-curand-cu12==10.3.2.106
+nvidia-cusolver-cu12==11.4.5.107
+nvidia-cusparse-cu12==12.1.0.106
+nvidia-nccl-cu12==2.20.5
+nvidia-nvjitlink-cu12==12.6.68
+nvidia-nvtx-cu12==12.1.105
+packaging==24.1
+pillow==10.4.0
+preshed==3.0.9
+pydantic==2.8.2
+pydantic_core==2.20.1
+Pygments==2.18.0
+pyparsing==3.1.4
+PySocks==1.7.1
+python-dateutil==2.9.0.post0
+PyYAML==6.0.2
+regex==2024.7.24
+requests==2.32.3
+rich==13.8.0
+scipy==1.13.1
+shellingham==1.5.4
six==1.16.0
+smart-open==7.0.4
smplx==0.1.28
-spacy==3.3.1
-torch==1.13.1
-torchvision==0.14.1
-tqdm==4.66.1
-wandb==0.16.1
-
-# Also, must install Clip:
-### pip install git+https://github.com/openai/CLIP.git
+soupsieve==2.6
+spacy==3.7.6
+spacy-legacy==3.0.12
+spacy-loggers==1.0.5
+srsly==2.4.8
+sympy==1.13.2
+thinc==8.2.5
+torch==1.12.1+cu113
+torchaudio==0.12.1+cu113
+torchvision==0.13.1+cu113
+tqdm==4.66.5
+triton==3.0.0
+typer==0.12.5
+typing_extensions==4.12.2
+urllib3==2.2.2
+wasabi==1.1.3
+wcwidth==0.2.13
+weasel==0.4.1
+wrapt==1.16.0
+zipp==3.20.1
diff --git a/sample/conditional_synthesis.py b/sample/conditional_synthesis.py
index 68332dd..ef17457 100644
--- a/sample/conditional_synthesis.py
+++ b/sample/conditional_synthesis.py
@@ -56,14 +56,25 @@ def main():
args = cond_synt_args()
fixseed(args.seed)
- assert args.dataset == 'humanml' and args.abs_3d # Only humanml dataset and the absolute root representation is supported for conditional synthesis
+ assert args.dataset in ["humanml", "custom"] and args.abs_3d # Only humanml dataset and the absolute root representation is supported for conditional synthesis
assert args.keyframe_conditioned
out_path = args.output_dir
name = os.path.basename(os.path.dirname(args.model_path))
niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '')
- max_frames = 196 if args.dataset in ['kit', 'humanml'] else (200 if args.dataset == 'trajectories' else 60)
- fps = 12.5 if args.dataset == 'kit' else 20
+ fps = 20
+ if args.dataset == "kit":
+ max_frames = 196
+ fps = 12.5
+ elif args.dataset == "humanml":
+ max_frames = 196
+ elif args.dataset == "trajectories":
+ max_frames = 200
+ elif args.dataset == "custom": ## FIXME: need a better way to handle inference for custom
+ max_frames = 196
+ fps = 25
+ else:
+ max_frames = 60
dist_util.setup_dist(args.device)
if out_path == '':
checkpoint_name = os.path.split(os.path.dirname(args.model_path))[-1]
@@ -228,7 +239,11 @@ def main():
# Unnormalize samples and recover XYZ *positions*
if model.data_rep == 'hml_vec':
- n_joints = 22 if (sample.shape[1] in [263, 264]) else 21
+ n_joints = 21 # default value here first
+ if sample.shape[1] in [263, 264]:
+ n_joints = 22
+ elif sample.shape[1] == 323: ## FIXME: hardcoded branch for current custom rig
+ n_joints = 27
sample = sample.cpu().permute(0, 2, 3, 1)
sample = data.dataset.t2m_dataset.inv_transform(sample).float()
sample = recover_from_ric(sample, n_joints, abs_3d=args.abs_3d)
diff --git a/train/train_condmdi.py b/train/train_condmdi.py
index 1e9398d..3eb6de3 100644
--- a/train/train_condmdi.py
+++ b/train/train_condmdi.py
@@ -37,7 +37,7 @@ def init_wandb(config, project_name=None, entity=None, tags=[], notes=None, **kw
def main():
- args = train_args(base_cls=card.motion_abs_unet_adagn_xl) # Choose the default full motion model from GMD
+ args = train_args(base_cls=card.motion_abs_unet_adagn_xl_custom_batch) # Choose the default full motion model from GMD
init_wandb(config=args)
args.save_dir = os.path.join("save", wandb.run.id)
pprint(args.__dict__)
diff --git a/train/training_loop.py b/train/training_loop.py
index b3aad55..cc09ef4 100644
--- a/train/training_loop.py
+++ b/train/training_loop.py
@@ -15,7 +15,7 @@
from diffusion.resample import LossAwareSampler, UniformSampler
from tqdm import tqdm
from diffusion.resample import create_named_schedule_sampler
-from data_loaders.humanml.networks.evaluator_wrapper import EvaluatorMDMWrapper
+# from data_loaders.humanml.networks.evaluator_wrapper import EvaluatorMDMWrapper
from eval import eval_humanml, eval_humanact12_uestc
from data_loaders.get_data import get_dataset_loader
from torch.cuda import amp
@@ -61,7 +61,10 @@ def __init__(self, args: TrainingOptions, model: nn.Module,
self.resume_step = 0
self.global_batch = self.batch_size # * dist.get_world_size()
self.num_steps = args.num_steps
- self.num_epochs = self.num_steps // len(self.data) + 1
+ try:
+ self.num_epochs = self.num_steps // len(self.data) + 1
+ except ZeroDivisionError:
+ print(f"We have {len(self.data)} data ... over {self.num_steps} steps?")
self.sync_cuda = torch.cuda.is_available()
@@ -105,37 +108,8 @@ def __init__(self, args: TrainingOptions, model: nn.Module,
self.schedule_sampler_type, diffusion)
self.eval_wrapper, self.eval_data, self.eval_gt_data = None, None, None
if args.dataset in ['kit', 'humanml'] and args.eval_during_training:
- raise NotImplementedError()
- mm_num_samples = 0 # mm is super slow hence we won't run it during training
- mm_num_repeats = 0 # mm is super slow hence we won't run it during training
- gen_loader = get_dataset_loader(name=args.dataset,
- batch_size=args.eval_batch_size,
- num_frames=None,
- split=args.eval_split,
- hml_mode='eval')
-
- self.eval_gt_data = get_dataset_loader(
- name=args.dataset,
- batch_size=args.eval_batch_size,
- num_frames=None,
- split=args.eval_split,
- hml_mode='gt')
- self.eval_wrapper = EvaluatorMDMWrapper(args.dataset,
- dist_util.dev())
- self.eval_data = {
- 'test':
- lambda: eval_humanml.get_mdm_loader(
- model,
- diffusion,
- args.eval_batch_size,
- gen_loader,
- mm_num_samples,
- mm_num_repeats,
- gen_loader.dataset.opt.max_motion_length,
- args.eval_num_samples,
- scale=1.,
- )
- }
+ raise NotImplementedError() # check git history for previous eval_during_training code
+
self.use_ddp = False
self.ddp_model = self.model
@@ -198,7 +172,7 @@ def _load_optimizer_state(self):
def run_loop(self):
print('train steps:', self.num_steps)
for epoch in range(self.num_epochs):
- print(f'Starting epoch {epoch}')
+ print(f'Starting epoch {epoch} / {self.num_epochs}')
for motion, cond in tqdm(self.data):
if not (not self.lr_anneal_steps or
self.step + self.resume_step < self.lr_anneal_steps):
diff --git a/utils/editing_util.py b/utils/editing_util.py
index 525de8d..adc1c62 100644
--- a/utils/editing_util.py
+++ b/utils/editing_util.py
@@ -2,7 +2,7 @@
import os
import numpy as np
import random
-from data_loaders import humanml_utils, amass_utils
+from data_loaders import humanml_utils, amass_utils, custom_utils
def bool_matmul(a, b):
@@ -43,6 +43,22 @@ def joint_to_full_mask(joint_mask, mode='pos_rot_vel'):
mask = torch.stack(mask_comp, dim=0).any(dim=0) # [1, seqlen, bs, 263]
return mask.permute(2, 3, 0, 1) # [bs, 263, 1, seqlen]
+def joint_to_full_mask_custom(joint_mask, mode='pos_rot_vel'):
+ assert mode in ['pos', 'pos_rot', 'pos_rot_vel']
+ # joint_mask.shape = [bs, n_joints, 1, seqlen]
+ joint_mask = joint_mask.permute(2, 3, 0, 1) # [1, seqlen, bs, n_joints]
+
+ mask_comp = []
+ mask_comp.append(bool_matmul(joint_mask, torch.tensor(custom_utils.MAT_POS)))
+ mask_comp.append(bool_matmul(joint_mask, torch.tensor(custom_utils.MAT_CNT)))
+ if mode in ['pos_rot', 'pos_rot_vel']:
+ mask_comp.append(bool_matmul(joint_mask, torch.tensor(custom_utils.MAT_ROT)))
+ if mode == 'pos_rot_vel':
+ mask_comp.append(bool_matmul(joint_mask, torch.tensor(custom_utils.MAT_VEL)))
+
+ mask = torch.stack(mask_comp, dim=0).any(dim=0) # [1, seqlen, bs, 12 * n_joints - 1]
+ return mask.permute(2, 3, 0, 1) # [bs, 12 * n_joints - 1, 1, seqlen]
+
def get_random_binary_mask(dim1, dim2, n):
valid_indices = torch.nonzero(torch.ones(dim1, dim2), as_tuple=False)
@@ -77,6 +93,8 @@ def get_keyframes_mask(data, lengths, edit_mode='benchmark_sparse', trans_length
elif n_joints == 764:
# AMASS dataset
joints_dim = 24
+ elif n_joints == 323: ## FIXME: another hidden joint check
+ joints_dim = 27
else:
raise ValueError('Unknown number of joints: {}'.format(n_joints))
@@ -217,6 +235,8 @@ def _sample_forced_mask(dim1, dim2):
obs_feature_mask = joint_to_full_mask(obs_joint_mask, mode=feature_mode)
elif joints_dim == 24:
obs_feature_mask = joint_to_full_mask_amass(obs_joint_mask, mode='all')
+ elif joints_dim == 27:
+ obs_feature_mask = joint_to_full_mask_custom(obs_joint_mask, mode=feature_mode)
else:
raise NotImplementedError(f"Unknown number of joints: {joints_dim}")
diff --git a/utils/model_util.py b/utils/model_util.py
index 9e7104d..8da820e 100644
--- a/utils/model_util.py
+++ b/utils/model_util.py
@@ -3,6 +3,7 @@
import torch
from torch import nn
from data_loaders.humanml.data.dataset import Text2MotionDatasetV2, HumanML3D, TextOnlyDataset
+from data_loaders.custom.data.dataset import CustomRig
from diffusion import gaussian_diffusion as gd
from diffusion.respace import DiffusionConfig, SpacedDiffusion, space_timesteps
@@ -13,7 +14,7 @@
from torch.utils.data import DataLoader
FullModelOptions = Union[DataOptions, ModelOptions, DiffusionOptions, TrainingOptions]
-Datasets = Union[Text2MotionDatasetV2, HumanML3D, TextOnlyDataset]
+Datasets = Union[Text2MotionDatasetV2, HumanML3D, CustomRig, TextOnlyDataset]
def load_model_wo_clip(model: nn.Module, state_dict):
@@ -43,12 +44,9 @@ def get_model_args(args: FullModelOptions, data: DataLoader):
action_emb = 'tensor'
if args.unconstrained:
cond_mode = 'no_cond'
- elif args.dataset == 'amass':
- cond_mode = 'no_cond'
- elif args.dataset in ['kit', 'humanml']:
- cond_mode = 'text'
else:
- cond_mode = 'action'
+ cond_mode = "action"
+
if hasattr(data.dataset, 'num_actions'):
num_actions = data.dataset.num_actions
else:
@@ -66,14 +64,22 @@ def get_model_args(args: FullModelOptions, data: DataLoader):
njoints = 67 # 4 + 21 * 3
else:
njoints = 263
+ cond_mode = "text"
elif args.dataset == 'kit':
data_rep = 'hml_vec'
njoints = 251
nfeats = 1
+ cond_mode = "text"
elif args.dataset == 'amass':
data_rep = 'hml_vec' # FIXME: find what is the correct data rep
njoints = 764
nfeats = 1
+ cond_mode = "no_cond"
+ elif args.dataset == "custom": ## FIXME: find out how to use proper values for custom here
+ data_rep = "hml_vec"
+ njoints = 323 # FIXME: once custom is imported correctly, replace this with (n_joints * 12 - 1)
+ nfeats = 1
+ cond_mode = "text"
# Only produce trajectory (4 values: rot, x, z, y)
if args.traj_only:
diff --git a/utils/parser_util.py b/utils/parser_util.py
index eab51fe..5a3914c 100644
--- a/utils/parser_util.py
+++ b/utils/parser_util.py
@@ -106,7 +106,7 @@ class DataOptions:
metadata={
"help": "Dataset name (choose from list).",
"choices":
- ['humanml', 'kit', 'humanact12', 'uestc', 'amass']
+ ['humanml', 'kit', 'humanact12', 'uestc', 'amass', 'custom'] ## FIXME: custom is temporary
})
data_dir: str = field(
default="",
diff --git a/visualize/vis_utils.py b/visualize/vis_utils.py
index da616a2..bb5bf33 100644
--- a/visualize/vis_utils.py
+++ b/visualize/vis_utils.py
@@ -15,7 +15,7 @@ def __init__(self, npy_path, sample_idx, rep_idx, device=0, cuda=True):
self.motions = self.motions[None][0]
self.rot2xyz = Rotation2xyz(device='cpu')
self.faces = self.rot2xyz.smpl_model.faces
- self.bs, self.njoints, self.nfeats, self.nframes = self.motions['motion'].shape
+ self.bs, self.njoints, self.nfeats, self.nframes = self.motions['motion'][0].shape
self.opt_cache = {}
self.sample_idx = sample_idx
self.total_num_samples = self.motions['num_samples']
@@ -24,15 +24,21 @@ def __init__(self, npy_path, sample_idx, rep_idx, device=0, cuda=True):
self.num_frames = self.motions['motion'][self.absl_idx].shape[-1]
self.j2s = joints2smpl(num_frames=self.num_frames, device_id=device, cuda=cuda)
+ ## if there are just 3 features (xyz?) run SMPLify which updates the motion to a 6-feature model
+ ## TODO: figure out what the 3 features used here are
if self.nfeats == 3:
- print(f'Running SMPLify For sample [{sample_idx}], repetition [{rep_idx}], it may take a few minutes.')
- motion_tensor, opt_dict = self.j2s.joint2smpl(self.motions['motion'][self.absl_idx].transpose(2, 0, 1)) # [nframes, njoints, 3]
- self.motions['motion'] = motion_tensor.cpu().numpy()
+ print(f'Running SMPLify for sample [{sample_idx}], repetition [{rep_idx}], it may take a few minutes.')
+ print("NOTE: This converts rotations to a 6D representation and adds 2 'joints', e.g. 22x3 -> 24x6")
+ print("NOTE: This then adds root node locations, e.g. 24x6 + 1x6 -> 25x6")
+
+ motion_tensor, opt_dict = self.j2s.joint2smpl(self.motions['motion'][0][self.absl_idx].transpose(2, 0, 1)) # [nframes, njoints, 3]
+ self.motions['motion'] = motion_tensor.cpu().numpy() # how does this change to (1, 25, 6, 196) from (1, 22, 3, 196)?? See NOTEs above.
elif self.nfeats == 6:
self.motions['motion'] = self.motions['motion'][[self.absl_idx]]
self.bs, self.njoints, self.nfeats, self.nframes = self.motions['motion'].shape
- self.real_num_frames = self.motions['lengths'][self.absl_idx]
+ self.real_num_frames = self.motions['lengths'][0][self.absl_idx]
+ print("NOTE: Finally, the 6D motion is converted back to xyz (3 dimensions) here.")
self.vertices = self.rot2xyz(torch.tensor(self.motions['motion']), mask=None,
pose_rep='rot6d', translation=True, glob=True,
jointstype='vertices',
@@ -40,23 +46,16 @@ def __init__(self, npy_path, sample_idx, rep_idx, device=0, cuda=True):
vertstrans=True)
self.root_loc = self.motions['motion'][:, -1, :3, :].reshape(1, 1, 3, -1)
- # import pdb; pdb.set_trace()
- # self.vertices += self.root_loc
- # self.vertices[:, :, 1, :] += self.root_loc[:, :, 1, :]
-
def get_vertices(self, sample_i, frame_i):
return self.vertices[sample_i, :, :, frame_i].squeeze().tolist()
def get_trimesh(self, sample_i, frame_i):
return Trimesh(vertices=self.get_vertices(sample_i, frame_i),
faces=self.faces)
-
+
def get_traj_sphere(self, mesh):
- # import pdb; pdb.set_trace()
root_posi = np.copy(mesh.vertices).mean(0) # (6000, 3)
- # import pdb; pdb.set_trace()
- # root_posi[1] = mesh.vertices.min(0)[1] + 0.1
- root_posi[1] = self.vertices.numpy().min(axis=(0, 1, 3))[1] + 0.1
+ root_posi[1] = self.vertices.numpy().min(axis=(0, 1, 3))[1] + 0.1 # why use min() from 3 axes?
mesh = trimesh.primitives.Sphere(radius=0.05, center=root_posi, transform=None, subdivisions=1)
return mesh
@@ -70,7 +69,7 @@ def save_obj(self, save_path, frame_i):
with open(ground_save_path, 'w') as fw:
ground_sph_mesh.export(fw, 'obj')
return save_path
-
+
def save_npy(self, save_path):
data_dict = {
'motion': self.motions['motion'][0, :, :, :self.real_num_frames],