diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 2ea57e3e1c..9af9fb7a7a 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -18,7 +18,7 @@ Steps to reproduce the behavior:
4. See error
**Console logs / stack traces**
-Please wrap in [triple backticks (```)](https://help.github.com/en/articles/creating-and-highlighting-code-blocks) to make it easier to read.
+Please wrap in triple backticks (```) to make it easier to read.
**Screenshots**
If applicable, add screenshots to help explain your problem.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 6bf245b4cf..47c73ebf07 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -17,8 +17,8 @@ Describe the changes made in this PR.
### Checklist
- [ ] Added tests that prove my fix is effective or that my feature works
-- [ ] Updated the [changelog](https://github.com/Unity-Technologies/ml-agents/blob/main/com.unity.ml-agents/CHANGELOG.md) (if applicable)
-- [ ] Updated the [documentation](https://github.com/Unity-Technologies/ml-agents/tree/main/docs) (if applicable)
-- [ ] Updated the [migration guide](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Migrating.md) (if applicable)
+- [ ] Updated the changelog (if applicable)
+- [ ] Updated the documentation (if applicable)
+- [ ] Updated the migration guide (if applicable)
### Other comments
diff --git a/.github/stale.yml b/.github/stale.yml
index 2328fea17c..88e2766248 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -10,15 +10,15 @@ only: issues
# Issue specific configuration
issues:
limitPerRun: 5
- daysUntilStale: 28
- daysUntilClose: 14
+ daysUntilStale: 90
+ daysUntilClose: 30
markComment: >
This issue has been automatically marked as stale because it has not had activity in the
- last 28 days. It will be closed in the next 14 days if no further activity occurs.
+ last 90 days. It will be closed in the next 30 days if no further activity occurs.
Thank you for your contributions.
closeComment: >
This issue has been automatically closed because it has not had activity in the
- last 42 days. If this issue is still valid, please ping a maintainer.
+ last 120 days. If this issue is still valid, please ping a maintainer.
Thank you for your contributions.
exemptLabels:
- request
diff --git a/.github/workflows/colab.yml b/.github/workflows/colab.yml
index 8c2236cb2c..8e07f3108a 100644
--- a/.github/workflows/colab.yml
+++ b/.github/workflows/colab.yml
@@ -8,7 +8,10 @@ on:
- 'colab/**'
- '.github/workflows/colab.yml'
push:
- branches: [main]
+ branches:
+ - main
+ - develop
+ - 'release/**'
workflow_dispatch:
jobs:
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index 2fd09d2fe4..f44ec032e6 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -21,7 +21,7 @@ jobs:
node-version: '12'
- name: Install manual dependencies
run: |
- sudo npm install -g markdown-link-check
+ sudo npm install -g markdown-link-check@3.8.7
python -m pip install pre-commit
pre-commit install
- name: Run markdown checker
@@ -43,13 +43,13 @@ jobs:
# If one test in the matrix fails we still want to run the others.
fail-fast: false
matrix:
- python-version: [3.7.x, 3.8.x, 3.9.x]
+ python-version: [3.8.x, 3.9.x, 3.10.x]
include:
- - python-version: 3.7.x
- pip_constraints: test_constraints_min_version.txt
- python-version: 3.8.x
- pip_constraints: test_constraints_mid_version.txt
+ pip_constraints: test_constraints_min_version.txt
- python-version: 3.9.x
+ pip_constraints: test_constraints_mid_version.txt
+ - python-version: 3.10.x
pip_constraints: test_constraints_max_version.txt
steps:
- uses: actions/checkout@v2
@@ -87,7 +87,7 @@ jobs:
run: |
pytest --cov=ml-agents --cov=ml-agents-envs \
--cov-report=html --junitxml=junit/test-results-${{ matrix.python-version }}.xml \
- -p no:warnings -v -n auto
+ -p no:warnings -v -n 8
- name: Upload pytest test results
uses: actions/upload-artifact@v2
with:
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index e64e672991..53d05952ce 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -3,7 +3,10 @@ name: pre-commit
on:
pull_request:
push:
- branches: [main]
+ branches:
+ - main
+ - develop
+ - 'release/**'
workflow_dispatch:
jobs:
@@ -17,7 +20,7 @@ jobs:
submodules: recursive
- uses: actions/setup-python@v2
with:
- python-version: 3.7.x
+ python-version: 3.8.x
- uses: actions/setup-ruby@v1
env:
ImageOS: ubuntu20
@@ -48,8 +51,9 @@ jobs:
with:
node-version: '12'
- name: Install manual dependencies
+ # pin markdown-link-check version to support multi-level reference link
run: |
- sudo npm install -g markdown-link-check
+ sudo npm install -g markdown-link-check@3.8.7
python -m pip install pre-commit
pre-commit install
- name: Run markdown checker
diff --git a/.github/workflows/publish_docs.yaml b/.github/workflows/publish_docs.yaml
new file mode 100644
index 0000000000..0cec1bf586
--- /dev/null
+++ b/.github/workflows/publish_docs.yaml
@@ -0,0 +1,24 @@
+name: Publish HTML Docs
+
+on:
+ workflow_dispatch:
+
+jobs:
+ publish:
+ name: Publish Docs to GH Pages
+ runs-on: [self-hosted, Linux, X64]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ with:
+ token: ${{ secrets.PUBLIC_GH_TOKEN }}
+ - name: Setup Python 3.8
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+ - name: Publish docs
+ run: |
+ pip install mkdocs
+ git remote add public git@github.com:Unity-Technologies/ml-agents.git
+ mkdocs gh-deply --clean -r public
+
diff --git a/.github/workflows/publish_pypi.yaml b/.github/workflows/publish_pypi.yaml
index 294aed09cb..1ff4dd8fc9 100644
--- a/.github/workflows/publish_pypi.yaml
+++ b/.github/workflows/publish_pypi.yaml
@@ -20,10 +20,10 @@ jobs:
steps:
- uses: actions/checkout@main
- - name: Set up Python 3.7
+ - name: Set up Python 3.8
uses: actions/setup-python@v2
with:
- python-version: 3.7
+ python-version: 3.8
- name: Install dependencies
run: pip install setuptools wheel twine --user
- name: verify git tag vs. version
diff --git a/.github/workflows/publish_pypi_python_api.yaml b/.github/workflows/publish_pypi_python_api.yaml
index 465b05ac8d..c7adc7e45c 100644
--- a/.github/workflows/publish_pypi_python_api.yaml
+++ b/.github/workflows/publish_pypi_python_api.yaml
@@ -19,11 +19,11 @@ jobs:
package-path: [ml-agents-envs]
steps:
- - uses: actions/checkout@main
- - name: Set up Python 3.7
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.8
uses: actions/setup-python@v2
with:
- python-version: 3.7
+ python-version: 3.8
- name: Install dependencies
run: pip install setuptools wheel twine --user
- name: verify git tag vs. version
@@ -39,6 +39,7 @@ jobs:
if: startsWith(github.ref, 'refs/tags') && contains(github.ref, 'test')
uses: actions/gh-action-pypi-publish@717ba43cfbb0387f6ce311b169a825772f54d295
with:
+ user: __token__
password: ${{ secrets.TEST_PYPI_PASSWORD }}
repository_url: https://test.pypi.org/legacy/
packages_dir: ${{ matrix.package-path }}/dist/
@@ -46,5 +47,6 @@ jobs:
if: startsWith(github.ref, 'refs/tags') && !contains(github.ref, 'test')
uses: actions/gh-action-pypi-publish@717ba43cfbb0387f6ce311b169a825772f54d295
with:
+ user: __token__
password: ${{ secrets.PYPI_PASSWORD }}
packages_dir: ${{ matrix.package-path }}/dist/
diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml
index 23c8a77acc..9e50aaea5d 100644
--- a/.github/workflows/pytest.yml
+++ b/.github/workflows/pytest.yml
@@ -9,7 +9,10 @@ on:
- 'test_requirements.txt'
- '.github/workflows/pytest.yml'
push:
- branches: [main]
+ branches:
+ - main
+ - develop
+ - 'release/**'
workflow_dispatch:
inputs:
pytest_markers:
@@ -36,13 +39,13 @@ jobs:
# If one test in the matrix fails we still want to run the others.
fail-fast: false
matrix:
- python-version: [3.7.x, 3.8.x, 3.9.x]
+ python-version: [3.8.x, 3.9.x, 3.10.x]
include:
- - python-version: 3.7.x
- pip_constraints: test_constraints_min_version.txt
- python-version: 3.8.x
- pip_constraints: test_constraints_mid_version.txt
+ pip_constraints: test_constraints_min_version.txt
- python-version: 3.9.x
+ pip_constraints: test_constraints_mid_version.txt
+ - python-version: 3.10.x
pip_constraints: test_constraints_max_version.txt
steps:
- uses: actions/checkout@v2
@@ -88,7 +91,7 @@ jobs:
run: |
pytest --cov=ml-agents --cov=ml-agents-envs \
--cov-report=html --junitxml=junit/test-results-${{ matrix.python-version }}.xml \
- -p no:warnings -v -m "${{ steps.pytest_marker.outputs.markers }}" -n auto
+ -p no:warnings -v -m "${{ steps.pytest_marker.outputs.markers }}" -n 8
- name: Upload pytest test results
uses: actions/upload-artifact@v2
with:
diff --git a/.gitmodules b/.gitmodules
index cd43b6df8e..e69de29bb2 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +0,0 @@
-[submodule "com.unity.ml-agents"]
- path = com.unity.ml-agents
- url = ../com.unity.ml-agents.git
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ba9a84327d..8d15b038b8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/python/black
- rev: 22.1.0
+ rev: 22.3.0
hooks:
- id: black
exclude: >
@@ -25,7 +25,7 @@ repos:
exclude: ".*_pb2.py"
args: [--ignore-missing-imports, --disallow-incomplete-defs, --no-strict-optional]
additional_dependencies: [types-PyYAML, types-attrs, types-protobuf, types-setuptools, types-filelock]
-- repo: https://gitlab.com/pycqa/flake8
+- repo: https://github.com/PyCQA/flake8
rev: 3.9.2
hooks:
- id: flake8
@@ -128,6 +128,6 @@ repos:
- id: generate-markdown-docs
name: generate markdown docs
language: python
- entry: ./utils/generate_markdown_docs.py --package_dirs ml-agents-envs
+ entry: ./utils/generate_markdown_docs.py --package_dirs ml-agents-envs ml-agents
pass_filenames: false
additional_dependencies: [pyyaml, pydoc-markdown==3.10.1]
diff --git a/.yamato/com.unity.ml-agents-coverage.yml b/.yamato/com.unity.ml-agents-coverage.yml
index 09ef550417..f46612541b 100644
--- a/.yamato/com.unity.ml-agents-coverage.yml
+++ b/.yamato/com.unity.ml-agents-coverage.yml
@@ -12,11 +12,10 @@ test_coverage_{{ package.name }}_{{ platform.name }}_{{ editor.version }}:
image: {{ platform.image }}
flavor: {{ platform.flavor}}
commands:
- - git submodule update --init --recursive
- - npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
+ - npm install upm-ci-utils@1.27.0 -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
- upm-ci project test -u {{ editor.version }} --type project-tests --project-path {{ editor.testProject }} --package-filter {{ package.name }} {{ coverageOptions }} --extra-utr-arg "reruncount=2"
- |
- conda activate python3.7
+ conda activate python3.8
python3 ml-agents/tests/yamato/check_coverage_percent.py upm-ci~/test-results/ {{ package.minCoveragePct }}
artifacts:
logs:
@@ -29,6 +28,7 @@ test_coverage_{{ package.name }}_{{ platform.name }}_{{ editor.version }}:
{% if platform.name == "linux" %}
expression: |
(pull_request.target eq "main" OR
+ pull_request.target eq "develop" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
diff --git a/.yamato/com.unity.ml-agents-optional-dep-tests.yml b/.yamato/com.unity.ml-agents-optional-dep-tests.yml
deleted file mode 100644
index c70b4cf250..0000000000
--- a/.yamato/com.unity.ml-agents-optional-dep-tests.yml
+++ /dev/null
@@ -1,66 +0,0 @@
-optional_deps:
- - name: Analytics
- project: "OptionalDepedencyTests/NoAnalyticsModule"
- version: 2020.3
- - name: Physics
- project: OptionalDepedencyTests/NoPhysicsModule
- version: 2020.3
- - name: Physics2D
- project: OptionalDepedencyTests/NoPhysics2DModule
- version: 2020.3
----
-
- {% for optional_dep in optional_deps %}
-OptionalDependencyTests_{{ optional_dep.name }}:
- name : Test Optional Package Dependencies {{ optional_dep.name }}
- agent:
- type: Unity::VM
- image: ml-agents/ml-agents-ubuntu-18.04:latest
- flavor: b1.medium
- commands:
- - git submodule update --init --recursive
- - |
- curl -L https://artifactory.prd.it.unity3d.com/artifactory/api/gpg/key/public | sudo apt-key add -
- sudo sh -c "echo 'deb https://artifactory.prd.it.unity3d.com/artifactory/unity-apt-local bionic main' > /etc/apt/sources.list.d/unity.list"
- sudo apt update
- sudo apt install -y unity-config
- npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
- unity-config settings editor-path ./.Editor
- unity-config project create opt-deps-test
- unity-config project add dependency com.unity.ml-agents/
- unity-config project add testable com.unity.ml-agents
- unity-config project add dependency com.unity.modules.imageconversion@1.0.0
- unity-config project add dependency com.unity.modules.jsonserialize@1.0.0
- {% unless optional_dep.name == "Physics" %}
- unity-config project add dependency com.unity.modules.physics@1.0.0
- {% endunless %}
- {% unless optional_dep.name == "Physics2D" %}
- unity-config project add dependency com.unity.modules.physics2d@1.0.0
- {% endunless %}
- {% unless optional_dep.name == "Analytics" %}
- unity-config project add dependency com.unity.modules.unityanalytics@1.0.0
- {% endunless %}
- upm-ci project test -u {{ optional_dep.version }} --type project-tests --project-path opt-deps-test --package-filter com.unity.ml-agents
- artifacts:
- logs:
- paths:
- - "upm-ci~/test-results/**/*"
- dependencies:
- - .yamato/com.unity.ml-agents-pack.yml#pack
- {% for coverage_editor in coverage_test_editors %}
- {% for coverage_platform in coverage_test_platforms %}
- {% for coverage_package in coverage_test_packages %}
- - .yamato/com.unity.ml-agents-coverage.yml#test_coverage_{{ coverage_package.name }}_{{ coverage_platform.name }}_{{ coverage_editor.version }}
- {% endfor %}
- {% endfor %}
- {% endfor %}
- triggers:
- cancel_old_ci: true
- expression: |
- (pull_request.target eq "main" OR
- pull_request.target match "release.+") AND
- NOT pull_request.draft AND
- (pull_request.changes.any match "com.unity.ml-agents/**" OR
- pull_request.changes.any match ".yamato/com.unity.ml-agents-optional-dep-tests.yml")
- {% endfor %}
-
diff --git a/.yamato/com.unity.ml-agents-pack.yml b/.yamato/com.unity.ml-agents-pack.yml
index b72eb073ac..71e6d5d9af 100644
--- a/.yamato/com.unity.ml-agents-pack.yml
+++ b/.yamato/com.unity.ml-agents-pack.yml
@@ -5,12 +5,11 @@ pack:
image: ml-agents/ml-agents-ubuntu-18.04:latest
flavor: b1.small
commands:
- - git submodule update --init --recursive
- |
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
python3 -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
- unity-downloader-cli -u 2020.3 -c editor --wait --fast
+ unity-downloader-cli -u 2021.3 -c editor --wait --fast
./.Editor/Unity -projectPath Project -batchMode -executeMethod Unity.MLAgents.SampleExporter.ExportCuratedSamples -logFile -
npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
upm-ci project pack --project-path Project
diff --git a/.yamato/com.unity.ml-agents-performance.yml b/.yamato/com.unity.ml-agents-performance.yml
index 211381a818..c9f16cf2a3 100644
--- a/.yamato/com.unity.ml-agents-performance.yml
+++ b/.yamato/com.unity.ml-agents-performance.yml
@@ -1,6 +1,6 @@
test_editors:
- - version: 2020.3
- - version: 2021.2
+ - version: 2021.3
+ - version: 2022.1
---
{% for editor in test_editors %}
Run_Mac_Perfomance_Tests{{ editor.version }}:
@@ -12,7 +12,6 @@ Run_Mac_Perfomance_Tests{{ editor.version }}:
variables:
UNITY_VERSION: {{ editor.version }}
commands:
- - git submodule update --init --recursive
- python3 -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
- unity-downloader-cli -u {{ editor.version }} -c editor --wait --fast
- curl -s https://artifactory.prd.it.unity3d.com/artifactory/unity-tools-local/utr-standalone/utr --output utr
@@ -21,7 +20,7 @@ Run_Mac_Perfomance_Tests{{ editor.version }}:
triggers:
cancel_old_ci: true
recurring:
- - branch: main
+ - branch: develop
frequency: daily
artifacts:
logs:
diff --git a/.yamato/com.unity.ml-agents-promotion.yml b/.yamato/com.unity.ml-agents-promotion.yml
index 1a36644f0c..e4930acfea 100644
--- a/.yamato/com.unity.ml-agents-promotion.yml
+++ b/.yamato/com.unity.ml-agents-promotion.yml
@@ -1,5 +1,5 @@
test_editors:
- - version: 2019.3
+ - version: 2021.3
test_platforms:
- name: win
type: Unity::VM
@@ -18,7 +18,7 @@ promotion_test_{{ platform.name }}_{{ editor.version }}:
variables:
UPMCI_PROMOTION: 1
commands:
- - npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
+ - npm install upm-ci-utils@1.27.0 -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
- upm-ci package test --unity-version {{ editor.version }} --package-path com.unity.ml-agents
artifacts:
logs:
@@ -48,7 +48,7 @@ promote:
variables:
UPMCI_PROMOTION: 1
commands:
- - npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
+ - npm install upm-ci-utils@1.27.0 -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
- upm-ci package promote --package-path com.unity.ml-agents
# triggers:
# tags:
diff --git a/.yamato/com.unity.ml-agents-publish.yml b/.yamato/com.unity.ml-agents-publish.yml
index a87e83c35e..3f28322007 100644
--- a/.yamato/com.unity.ml-agents-publish.yml
+++ b/.yamato/com.unity.ml-agents-publish.yml
@@ -7,7 +7,7 @@ publish:
variables:
UPMCI_ENABLE_PACKAGE_SIGNING: 1
commands:
- - npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
+ - npm install upm-ci-utils@1.27.0 -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
- upm-ci package publish --package-path com.unity.ml-agents
triggers:
cancel_old_ci: true
diff --git a/.yamato/com.unity.ml-agents-test.yml b/.yamato/com.unity.ml-agents-test.yml
index b77dcfc3c2..54e334cc8d 100644
--- a/.yamato/com.unity.ml-agents-test.yml
+++ b/.yamato/com.unity.ml-agents-test.yml
@@ -1,10 +1,10 @@
{% metadata_file .yamato/coverage_tests.metafile %}
test_editors:
- - version: 2020.3
+ - version: 2021.3
# We want some scene tests to run in the DevProject, but packages there only support 2020+
testProject: Project
enableNoDefaultPackages: !!bool true
- - version: 2021.2
+ - version: 2022.1
testProject: DevProject
enableNoDefaultPackages: !!bool true
@@ -59,7 +59,7 @@ all_package_tests:
triggers:
cancel_old_ci: true
recurring:
- - branch: main
+ - branch: develop
frequency: daily
{% for package in packages %}
@@ -79,8 +79,7 @@ test_{{ package.name }}_{{ platform.name }}_{{ editor.version }}:
image: {{ platform.image }}
flavor: {{ platform.flavor}}
commands:
- - git submodule update --init --recursive
- - npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
+ - npm install upm-ci-utils@1.27.0 -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
- upm-ci package test -u {{ editor.version }} --package-path {{ package.name }} {{ noDefaultPackagesOptions }} --warnings-as-errors --extra-utr-arg "reruncount=2"
artifacts:
logs:
@@ -100,6 +99,7 @@ test_{{ package.name }}_{{ platform.name }}_{{ editor.version }}:
{% if platform.name == "linux" %}
expression: |
(pull_request.target eq "main" OR
+ pull_request.target eq "develop" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
@@ -124,14 +124,13 @@ test_{{ package.name }}_{{ platform.name }}_trunk:
image: {{ platform.image }}
flavor: {{ platform.flavor}}
commands:
- - git submodule update --init --recursive
- |
{% if platform.name == "linux" %}
- conda activate python3.7
+ conda activate python3.8
{% endif %}
python -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
unity-downloader-cli -u trunk -c editor --wait --fast
- npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
+ npm install upm-ci-utils@1.27.0 -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
upm-ci project test -u {{ editor.version }} --project-path {{ editor.testProject }} --package-filter {{ package.name }} --extra-create-project-arg="-upmNoDefaultPackages" --extra-utr-arg "reruncount=2"
artifacts:
logs:
diff --git a/.yamato/compressed-sensor-test.yml b/.yamato/compressed-sensor-test.yml
index a0745255c2..23e28a5533 100644
--- a/.yamato/compressed-sensor-test.yml
+++ b/.yamato/compressed-sensor-test.yml
@@ -10,10 +10,9 @@ test_compressed_obs_{{ editor.version }}:
variables:
UNITY_VERSION: {{ editor.version }}
commands:
- - git submodule update --init --recursive
- |
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
python -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python -u -m ml-agents.tests.yamato.setup_venv
python ml-agents/tests/yamato/scripts/run_compressed_sensor.py --env=artifacts/testPlayer-TestGridCompressed
@@ -25,6 +24,7 @@ test_compressed_obs_{{ editor.version }}:
{% if editor.extra_test == "sensor" %}
expression: |
(pull_request.target eq "main" OR
+ pull_request.target eq "develop" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
diff --git a/.yamato/coverage_tests.metafile b/.yamato/coverage_tests.metafile
index a12e8cd2ad..7f5aaad096 100644
--- a/.yamato/coverage_tests.metafile
+++ b/.yamato/coverage_tests.metafile
@@ -1,5 +1,5 @@
coverage_test_editors:
- - version: 2020.3
+ - version: 2021.3
testProject: DevProject
coverage_test_platforms:
diff --git a/.yamato/gym-interface-test.yml b/.yamato/gym-interface-test.yml
index f71f67d3b0..a90a8ce9ec 100644
--- a/.yamato/gym-interface-test.yml
+++ b/.yamato/gym-interface-test.yml
@@ -10,10 +10,9 @@ test_gym_interface_{{ editor.version }}:
variables:
UNITY_VERSION: {{ editor.version }}
commands:
- - git submodule update --init --recursive
- |
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
python -m pip install wheel --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python -u -m ml-agents.tests.yamato.setup_venv
@@ -25,6 +24,7 @@ test_gym_interface_{{ editor.version }}:
{% if editor.extra_test == "gym" %}
expression: |
(pull_request.target eq "main" OR
+ pull_request.target eq "develop" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
diff --git a/.yamato/protobuf-generation-test.yml b/.yamato/protobuf-generation-test.yml
index 2bc7f62b65..be490867e9 100644
--- a/.yamato/protobuf-generation-test.yml
+++ b/.yamato/protobuf-generation-test.yml
@@ -6,13 +6,12 @@ test_linux_protobuf_generation:
flavor: b1.large
variables:
GRPC_VERSION: "1.14.1"
- CS_PROTO_PATH: "Runtime/Grpc/CommunicatorObjects"
+ CS_PROTO_PATH: "com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects"
commands:
- - git submodule update --init --recursive
- |
sudo apt-get update && sudo apt-get install -y nuget
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
nuget install Grpc.Tools -Version $GRPC_VERSION -OutputDirectory protobuf-definitions/
python3 -m pip install --upgrade pip --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python3 -m pip install grpcio==1.28.1 grpcio-tools==1.13.0 protobuf==3.11.3 six==1.14.0 mypy-protobuf==1.16.0 --progress-bar=off --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
@@ -23,7 +22,6 @@ test_linux_protobuf_generation:
popd
mkdir -p artifacts
touch artifacts/proto.patch
- cd com.unity.ml-agents
git diff --exit-code -- :/ ":(exclude,top)$CS_PROTO_PATH/*.meta" \
|| { GIT_ERR=$?; echo "protobufs need to be regenerated, apply the patch uploaded to artifacts."; \
echo "Apply the patch with the command 'git apply proto.patch'"; \
@@ -32,6 +30,7 @@ test_linux_protobuf_generation:
cancel_old_ci: true
expression: |
(pull_request.target eq "main" OR
+ pull_request.target eq "develop" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "protobuf-definitions/**" OR
diff --git a/.yamato/pytest-gpu.yml b/.yamato/pytest-gpu.yml
index b1824b8b10..99999ca224 100644
--- a/.yamato/pytest-gpu.yml
+++ b/.yamato/pytest-gpu.yml
@@ -5,19 +5,24 @@ pytest_gpu:
image: ml-agents/ml-agents-ubuntu-18.04:latest
flavor: b1.large
commands:
- - git submodule update --init --recursive
- |
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
python3 -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python3 -u -m ml-agents.tests.yamato.setup_venv
python3 -m pip install --progress-bar=off -r test_requirements.txt --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python3 -m pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
- python3 -m pytest -m "not slow" -n auto --junitxml=junit/test-results.xml -p no:warnings
+ if python -c "exec('import torch \nif not torch.cuda.is_available(): raise')" &> /dev/null; then
+ echo 'all good'
+ else
+ exit 1
+ fi
+ python3 -m pytest -m "not slow" --junitxml=junit/test-results.xml -p no:warnings
triggers:
cancel_old_ci: true
expression: |
(push.branch eq "main" OR
+ push.branch eq "develop" OR
push.branch match "release.+") AND
push.changes.any match "ml-agents/**" AND
NOT push.changes.all match "**/*.md"
diff --git a/.yamato/python-ll-api-test.yml b/.yamato/python-ll-api-test.yml
index 51b8e269f2..cde94d31fb 100644
--- a/.yamato/python-ll-api-test.yml
+++ b/.yamato/python-ll-api-test.yml
@@ -10,10 +10,9 @@ test_linux_ll_api_{{ editor.version }}:
variables:
UNITY_VERSION: {{ editor.version }}
commands:
- - git submodule update --init --recursive
- |
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
python -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python -u -m ml-agents.tests.yamato.setup_venv
python ml-agents/tests/yamato/scripts/run_llapi.py
@@ -27,6 +26,7 @@ test_linux_ll_api_{{ editor.version }}:
{% if editor.extra_test == "llapi" %}
expression: |
(pull_request.target eq "main" OR
+ pull_request.target eq "develop" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
diff --git a/.yamato/sonar-python-package.yml b/.yamato/sonar-python-package.yml
new file mode 100644
index 0000000000..3e9403bc30
--- /dev/null
+++ b/.yamato/sonar-python-package.yml
@@ -0,0 +1,23 @@
+csharp:
+ name: Sonarqube Scan for ml-agents python repo
+ agent:
+ type: Unity::metal::macmini
+ image: package-ci/mac:v1.8.1-822785
+ flavor: m1.mac
+ variables:
+ SONARQUBE_PROJECT_KEY: ai-ml-agents-toolkit
+ SONARQUBE_URL: https://sonarqube.internal.unity3d.com
+ SONARQUBE_LOGIN: a08467db099d82931708d480b8dbf428cf1921d5
+ TARGET_BRANCH: develop
+ commands:
+ - npm install shellcheck --save-dev
+ - npm install upm-ci-utils@1.27.0 -g --registry https://artifactory.prd.it.unity3d.com/artifactory/api/npm/upm-npm
+ - curl https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.7.0.2747-macosx.zip -o sonar-scanner-cli-macosx.zip -L
+ - unzip sonar-scanner-cli-macosx.zip -d ~/sonar-scanner-cli
+ - ~/sonar-scanner-cli/sonar-scanner-4.7.0.2747-macosx/bin/sonar-scanner -Dsonar.projectKey=$SONARQUBE_PROJECT_KEY -Dsonar.sources=ml-agents-env -Dsonar.sources=ml-agents -Dsonar.sources=ml-agents-plugin-examples -Dsonar.sources=ml-agents-trainer-plugin -Dsonar.sources=utils -Dsonar.host.url=$SONARQUBE_URL -Dsonar.login=$SONARQUBE_LOGIN -Dsonar.branch.name=$TARGET_BRANCH -Dsonar.scm.provider=git
+ triggers:
+ cancel_old_ci: true
+ expression: |
+ ((pull_request.target eq "main" OR pull_request.target eq "develop" OR pull_request.target match "release.+")
+ AND NOT pull_request.push.changes.all match "**/*.md") OR
+ (push.branch eq "main" OR push.branch eq "develop")
diff --git a/.yamato/standalone-build-test.yml b/.yamato/standalone-build-test.yml
index c709d2a7b5..fac93943e9 100644
--- a/.yamato/standalone-build-test.yml
+++ b/.yamato/standalone-build-test.yml
@@ -10,10 +10,9 @@ test_linux_standalone_{{ editor.version }}:
variables:
UNITY_VERSION: {{ editor.version }}
commands:
- - git submodule update --init --recursive
- |
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
python3 -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python3 -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
unity-downloader-cli -u {{ editor.version }} -c editor --wait --fast
@@ -27,6 +26,7 @@ test_linux_standalone_{{ editor.version }}:
cancel_old_ci: true
expression: |
(pull_request.target eq "main" OR
+ pull_request.target eq "develop" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
diff --git a/.yamato/standalone-build-webgl-test.yml b/.yamato/standalone-build-webgl-test.yml
index 395d45f3f2..8d7a699768 100644
--- a/.yamato/standalone-build-webgl-test.yml
+++ b/.yamato/standalone-build-webgl-test.yml
@@ -1,4 +1,4 @@
-{% capture editor_version %}2020.3{% endcapture %}
+{% capture editor_version %}2021.3{% endcapture %}
test_webgl_standalone_{{ editor_version }}:
name: Test WebGL Standalone {{ editor_version }}
agent:
@@ -8,10 +8,9 @@ test_webgl_standalone_{{ editor_version }}:
variables:
UNITY_VERSION: {{ editor_version }}
commands:
- - git submodule update --init --recursive
- |
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
python -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
unity-downloader-cli -u {{ editor_version }} -c editor -c WebGL --wait --fast
@@ -19,7 +18,7 @@ test_webgl_standalone_{{ editor_version }}:
triggers:
cancel_old_ci: true
recurring:
- - branch: main
+ - branch: develop
frequency: weekly
artifacts:
logs:
diff --git a/.yamato/test_versions.metafile b/.yamato/test_versions.metafile
index 7cfdce7c03..8fdd62bb10 100644
--- a/.yamato/test_versions.metafile
+++ b/.yamato/test_versions.metafile
@@ -3,9 +3,9 @@
# For each "other" test, we only run it against a single version of the
# editor to reduce the number of yamato jobs
test_editors:
- - version: 2020.3
+ - version: 2021.3
extra_test: gym
- - version: 2021.2
+ - version: 2022.1
extra_test: sensor
- version: trunk
extra_test: llapi
diff --git a/.yamato/training-backcompat-tests.yml b/.yamato/training-backcompat-tests.yml
deleted file mode 100644
index f179c4ca5a..0000000000
--- a/.yamato/training-backcompat-tests.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-
-test_mac_backcompat_2019.4:
- {% capture editor_version %}2019.4{% endcapture %}
- {% capture csharp_backcompat_version %}1.0.0{% endcapture %}
- # This test has to run on mac because it requires the custom build of tensorflow without AVX
- # Test against 2020.1 because 2020.2 has to run against package version 1.2.0
- name: Test Mac Backcompat Training {{ editor_version }}
- agent:
- type: Unity::VM::osx
- image: ml-agents/ml-agents-bokken-mac:0.1.5-853758
- flavor: b1.small
- variables:
- UNITY_VERSION: {{ editor_version }}
- commands:
- - git submodule update --init --recursive
- - |
- python -m venv venv && source venv/bin/activate
- python -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
- python -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
- unity-downloader-cli -u {{ editor_version }} -c editor --wait --fast
- # Backwards-compatibility tests.
- # If we make a breaking change to the communication protocol, these will need
- # to be disabled until the next release.
- python -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=mac
- python -u -m ml-agents.tests.yamato.training_int_tests --csharp {{ csharp_backcompat_version }}
- - |
- python -m venv venv_old && source venv_old/bin/activate
- python -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
- python -u -m ml-agents.tests.yamato.training_int_tests --python 0.24.0
- triggers:
- cancel_old_ci: true
- recurring:
- - branch: main
- frequency: daily
- artifacts:
- logs:
- paths:
- - "artifacts/standalone_build.txt"
- - "artifacts/inference.nn.txt"
- - "artifacts/inference.onnx.txt"
- - "artifacts/*.log"
- standalonebuild:
- paths:
- - "artifacts/testPlayer*/**"
- - "artifacts/models/**"
diff --git a/.yamato/training-int-tests.yml b/.yamato/training-int-tests.yml
index 860405e176..18f5f4f83d 100644
--- a/.yamato/training-int-tests.yml
+++ b/.yamato/training-int-tests.yml
@@ -10,10 +10,9 @@ test_linux_training_int_{{ editor.version }}:
variables:
UNITY_VERSION: {{ editor.version }}
commands:
- - git submodule update --init --recursive
- |
eval "$($HOME/anaconda/bin/conda shell.bash hook)"
- conda activate python3.7
+ conda activate python3.8
python -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python -u -m ml-agents.tests.yamato.training_int_tests
dependencies:
@@ -22,6 +21,7 @@ test_linux_training_int_{{ editor.version }}:
cancel_old_ci: true
expression: |
(pull_request.target eq "main" OR
+ pull_request.target eq "develop" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
diff --git a/DevProject/Packages/packages-lock.json b/DevProject/Packages/packages-lock.json
index 7eac0c8e6b..56804861c9 100644
--- a/DevProject/Packages/packages-lock.json
+++ b/DevProject/Packages/packages-lock.json
@@ -1,7 +1,7 @@
{
"dependencies": {
"com.unity.barracuda": {
- "version": "2.3.1-preview",
+ "version": "3.0.0",
"depth": 1,
"source": "registry",
"dependencies": {
@@ -12,7 +12,7 @@
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.burst": {
- "version": "1.6.0",
+ "version": "1.6.6",
"depth": 2,
"source": "registry",
"dependencies": {
@@ -46,7 +46,7 @@
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.mathematics": {
- "version": "1.2.1",
+ "version": "1.2.6",
"depth": 3,
"source": "registry",
"dependencies": {},
@@ -57,7 +57,7 @@
"depth": 0,
"source": "local",
"dependencies": {
- "com.unity.barracuda": "2.3.1-preview",
+ "com.unity.barracuda": "3.0.0",
"com.unity.modules.imageconversion": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0"
}
@@ -72,16 +72,14 @@
}
},
"com.unity.nuget.mono-cecil": {
- "version": "0.1.6-preview.2",
+ "version": "1.10.1",
"depth": 1,
"source": "registry",
- "dependencies": {
- "nuget.mono-cecil": "0.1.6-preview"
- },
+ "dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.nuget.newtonsoft-json": {
- "version": "2.0.0-preview",
+ "version": "3.0.2",
"depth": 1,
"source": "registry",
"dependencies": {},
@@ -108,7 +106,7 @@
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.settings-manager": {
- "version": "1.0.1",
+ "version": "1.0.3",
"depth": 1,
"source": "registry",
"dependencies": {},
@@ -159,13 +157,6 @@
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
- "nuget.mono-cecil": {
- "version": "0.1.6-preview",
- "depth": 2,
- "source": "registry",
- "dependencies": {},
- "url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
- },
"nuget.moq": {
"version": "1.0.0",
"depth": 1,
diff --git a/DevProject/ProjectSettings/EditorBuildSettings.asset b/DevProject/ProjectSettings/EditorBuildSettings.asset
index 1823a7a08e..7512fc3e3a 100644
--- a/DevProject/ProjectSettings/EditorBuildSettings.asset
+++ b/DevProject/ProjectSettings/EditorBuildSettings.asset
@@ -9,5 +9,5 @@ EditorBuildSettings:
path: Assets/ML-Agents/Scripts/Tests/Runtime/AcademyTest/AcademyStepperTestScene.unity
guid: 9bafc50b1e55b43b2b1ae9620f1f8311
m_configObjects:
- com.unity.ml-agents.settings: {fileID: 11400000, guid: 7017f4eb06bef4889a3608a54b1cc59e,
+ com.unity.ml-agents.settings: {fileID: 11400000, guid: 905d6ca857fdf4d028b93658cf00e271,
type: 2}
diff --git a/DevProject/ProjectSettings/ProjectVersion.txt b/DevProject/ProjectSettings/ProjectVersion.txt
index 4c9401b919..8ea1b855ae 100644
--- a/DevProject/ProjectSettings/ProjectVersion.txt
+++ b/DevProject/ProjectSettings/ProjectVersion.txt
@@ -1,2 +1,2 @@
-m_EditorVersion: 2020.3.25f1
-m_EditorVersionWithRevision: 2020.3.25f1 (9b9180224418)
+m_EditorVersion: 2021.3.11f1
+m_EditorVersionWithRevision: 2021.3.11f1 (0a5ca18544bf)
diff --git a/Project/Assets/ML-Agents/Examples/3DBall/Scenes/Visual3DBall.unity b/Project/Assets/ML-Agents/Examples/3DBall/Scenes/Visual3DBall.unity
index eee59cc5d6..0a57e315a0 100644
--- a/Project/Assets/ML-Agents/Examples/3DBall/Scenes/Visual3DBall.unity
+++ b/Project/Assets/ML-Agents/Examples/3DBall/Scenes/Visual3DBall.unity
@@ -43,7 +43,7 @@ RenderSettings:
--- !u!157 &3
LightmapSettings:
m_ObjectHideFlags: 0
- serializedVersion: 11
+ serializedVersion: 12
m_GIWorkflowMode: 0
m_GISettings:
serializedVersion: 2
@@ -54,7 +54,7 @@ LightmapSettings:
m_EnableBakedLightmaps: 1
m_EnableRealtimeLightmaps: 1
m_LightmapEditorSettings:
- serializedVersion: 10
+ serializedVersion: 12
m_Resolution: 2
m_BakeResolution: 40
m_AtlasSize: 1024
@@ -62,6 +62,7 @@ LightmapSettings:
m_AOMaxDistance: 1
m_CompAOExponent: 1
m_CompAOExponentDirect: 0
+ m_ExtractAmbientOcclusion: 0
m_Padding: 2
m_LightmapParameters: {fileID: 0}
m_LightmapsBakeMode: 1
@@ -76,10 +77,16 @@ LightmapSettings:
m_PVRDirectSampleCount: 32
m_PVRSampleCount: 500
m_PVRBounces: 2
+ m_PVREnvironmentSampleCount: 500
+ m_PVREnvironmentReferencePointCount: 2048
+ m_PVRFilteringMode: 2
+ m_PVRDenoiserTypeDirect: 0
+ m_PVRDenoiserTypeIndirect: 0
+ m_PVRDenoiserTypeAO: 0
m_PVRFilterTypeDirect: 0
m_PVRFilterTypeIndirect: 0
m_PVRFilterTypeAO: 0
- m_PVRFilteringMode: 1
+ m_PVREnvironmentMIS: 0
m_PVRCulling: 1
m_PVRFilteringGaussRadiusDirect: 1
m_PVRFilteringGaussRadiusIndirect: 5
@@ -87,9 +94,12 @@ LightmapSettings:
m_PVRFilteringAtrousPositionSigmaDirect: 0.5
m_PVRFilteringAtrousPositionSigmaIndirect: 2
m_PVRFilteringAtrousPositionSigmaAO: 1
- m_ShowResolutionOverlay: 1
+ m_ExportTrainingData: 0
+ m_TrainingDataDestination: TrainingData
+ m_LightProbeSampleCountMultiplier: 4
m_LightingDataAsset: {fileID: 0}
- m_UseShadowmask: 1
+ m_LightingSettings: {fileID: 4890085278179872738, guid: 7480a805600d24847ab9e9df1dc971fe,
+ type: 2}
--- !u!196 &4
NavMeshSettings:
serializedVersion: 2
@@ -109,6 +119,8 @@ NavMeshSettings:
manualTileSize: 0
tileSize: 256
accuratePlacement: 0
+ maxJobWorkers: 0
+ preserveTilesOutsideBounds: 0
debug:
m_Flags: 0
m_NavMeshData: {fileID: 0}
@@ -125,92 +137,92 @@ PrefabInstance:
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_LocalPosition.x
+ propertyPath: m_Pivot.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_LocalPosition.y
+ propertyPath: m_Pivot.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_LocalPosition.z
- value: 0
+ propertyPath: m_RootOrder
+ value: 1
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_LocalRotation.x
+ propertyPath: m_AnchorMax.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_LocalRotation.y
+ propertyPath: m_AnchorMax.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_LocalRotation.z
+ propertyPath: m_AnchorMin.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
+ propertyPath: m_AnchorMin.y
+ value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_RootOrder
- value: 1
+ propertyPath: m_SizeDelta.x
+ value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_AnchoredPosition.x
+ propertyPath: m_SizeDelta.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_AnchoredPosition.y
+ propertyPath: m_LocalPosition.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_SizeDelta.x
+ propertyPath: m_LocalPosition.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_SizeDelta.y
+ propertyPath: m_LocalPosition.z
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_AnchorMin.x
- value: 0
+ propertyPath: m_LocalRotation.w
+ value: 1
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_AnchorMin.y
+ propertyPath: m_LocalRotation.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_AnchorMax.x
+ propertyPath: m_LocalRotation.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_AnchorMax.y
+ propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_Pivot.x
+ propertyPath: m_AnchoredPosition.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 224194346362733190, guid: 3ce107b4a79bc4eef83afde434932a68,
type: 3}
- propertyPath: m_Pivot.y
+ propertyPath: m_AnchoredPosition.y
value: 0
objectReference: {fileID: 0}
m_RemovedComponents: []
@@ -226,6 +238,14 @@ PrefabInstance:
propertyPath: m_Name
value: Visual3DBall (2)
objectReference: {fileID: 0}
+ - target: {fileID: 1321468028730240, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_IsActive
+ value: 1
+ objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_RootOrder
+ value: 7
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalPosition.x
value: 40
@@ -238,6 +258,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 5
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 1
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalRotation.x
value: 0
@@ -250,14 +274,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
- objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_RootOrder
- value: 7
- objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
@@ -283,6 +299,14 @@ PrefabInstance:
propertyPath: m_Name
value: Visual3DBall (7)
objectReference: {fileID: 0}
+ - target: {fileID: 1321468028730240, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_IsActive
+ value: 1
+ objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_RootOrder
+ value: 12
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalPosition.x
value: 60
@@ -295,6 +319,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 5
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 1
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalRotation.x
value: 0
@@ -307,14 +335,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
- objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_RootOrder
- value: 12
- objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
@@ -340,6 +360,14 @@ PrefabInstance:
propertyPath: m_Name
value: Visual3DBall (1)
objectReference: {fileID: 0}
+ - target: {fileID: 1321468028730240, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_IsActive
+ value: 1
+ objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_RootOrder
+ value: 6
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalPosition.x
value: 20
@@ -352,6 +380,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 5
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 1
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalRotation.x
value: 0
@@ -364,14 +396,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
- objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_RootOrder
- value: 6
- objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
@@ -397,6 +421,14 @@ PrefabInstance:
propertyPath: m_Name
value: Visual3DBall (5)
objectReference: {fileID: 0}
+ - target: {fileID: 1321468028730240, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_IsActive
+ value: 1
+ objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_RootOrder
+ value: 10
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalPosition.x
value: 20
@@ -409,6 +441,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 5
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 1
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalRotation.x
value: 0
@@ -421,14 +457,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
- objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_RootOrder
- value: 10
- objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
@@ -454,6 +482,14 @@ PrefabInstance:
propertyPath: m_Name
value: Visual3DBall (4)
objectReference: {fileID: 0}
+ - target: {fileID: 1321468028730240, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_IsActive
+ value: 1
+ objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_RootOrder
+ value: 9
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalPosition.x
value: 0
@@ -466,6 +502,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 5
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 1
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalRotation.x
value: 0
@@ -478,14 +518,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
- objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_RootOrder
- value: 9
- objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
@@ -529,6 +561,7 @@ Transform:
m_LocalRotation: {x: -0.069583125, y: 0.0049145464, z: 0.0702813, w: 0.99508524}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 5, y: 0.19999993, z: 5}
+ m_ConstrainProportionsScale: 0
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 4
@@ -578,9 +611,10 @@ MonoBehaviour:
m_GameObject: {fileID: 1746325439}
m_Enabled: 1
m_EditorHideFlags: 0
- m_Script: {fileID: 1077351063, guid: f5f67c52d1564df4a8936ccd202a3bd8, type: 3}
+ m_Script: {fileID: 11500000, guid: 4f231c4fb786f3946a6b90b886c48677, type: 3}
m_Name:
m_EditorClassIdentifier:
+ m_SendPointerHoverToParent: 1
m_HorizontalAxis: Horizontal
m_VerticalAxis: Vertical
m_SubmitButton: Submit
@@ -597,7 +631,7 @@ MonoBehaviour:
m_GameObject: {fileID: 1746325439}
m_Enabled: 1
m_EditorHideFlags: 0
- m_Script: {fileID: -619905303, guid: f5f67c52d1564df4a8936ccd202a3bd8, type: 3}
+ m_Script: {fileID: 11500000, guid: 76c392e42b5098c458856cdf6ecaaaa1, type: 3}
m_Name:
m_EditorClassIdentifier:
m_FirstSelected: {fileID: 0}
@@ -613,6 +647,7 @@ Transform:
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
+ m_ConstrainProportionsScale: 0
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 3
@@ -628,6 +663,14 @@ PrefabInstance:
propertyPath: m_Name
value: Visual3DBall (6)
objectReference: {fileID: 0}
+ - target: {fileID: 1321468028730240, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_IsActive
+ value: 1
+ objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_RootOrder
+ value: 11
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalPosition.x
value: 40
@@ -640,6 +683,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 5
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 1
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalRotation.x
value: 0
@@ -652,14 +699,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
- objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_RootOrder
- value: 11
- objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
@@ -681,6 +720,10 @@ PrefabInstance:
m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
+ - target: {fileID: 4943719350691982, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
+ propertyPath: m_RootOrder
+ value: 2
+ objectReference: {fileID: 0}
- target: {fileID: 4943719350691982, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
propertyPath: m_LocalPosition.x
value: 0
@@ -693,6 +736,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 0
objectReference: {fileID: 0}
+ - target: {fileID: 4943719350691982, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 0.8681629
+ objectReference: {fileID: 0}
- target: {fileID: 4943719350691982, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
propertyPath: m_LocalRotation.x
value: 0.31598538
@@ -705,14 +752,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0.13088542
objectReference: {fileID: 0}
- - target: {fileID: 4943719350691982, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
- propertyPath: m_LocalRotation.w
- value: 0.8681629
- objectReference: {fileID: 0}
- - target: {fileID: 4943719350691982, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
- propertyPath: m_RootOrder
- value: 2
- objectReference: {fileID: 0}
- target: {fileID: 4943719350691982, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
propertyPath: m_LocalEulerAnglesHint.y
value: -45
@@ -730,6 +769,14 @@ PrefabInstance:
propertyPath: m_Name
value: Visual3DBall (3)
objectReference: {fileID: 0}
+ - target: {fileID: 1321468028730240, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_IsActive
+ value: 1
+ objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_RootOrder
+ value: 8
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalPosition.x
value: 60
@@ -742,6 +789,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 5
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 1
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalRotation.x
value: 0
@@ -754,14 +805,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
- objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_RootOrder
- value: 8
- objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
@@ -805,9 +848,10 @@ Camera:
m_ClearFlags: 2
m_BackGroundColor: {r: 0.46666667, g: 0.5647059, b: 0.60784316, a: 1}
m_projectionMatrixMode: 1
+ m_GateFitMode: 2
+ m_FOVAxisMode: 0
m_SensorSize: {x: 36, y: 24}
m_LensShift: {x: 0, y: 0}
- m_GateFitMode: 2
m_FocalLength: 50
m_NormalizedViewPortRect:
serializedVersion: 2
@@ -845,6 +889,7 @@ Transform:
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 28.99, y: 14.09, z: -40.6}
m_LocalScale: {x: 1, y: 1, z: 1}
+ m_ConstrainProportionsScale: 0
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 0
@@ -860,6 +905,10 @@ PrefabInstance:
propertyPath: m_Name
value: Visual3DBall
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_RootOrder
+ value: 5
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalPosition.x
value: 0
@@ -872,6 +921,10 @@ PrefabInstance:
propertyPath: m_LocalPosition.z
value: 5
objectReference: {fileID: 0}
+ - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
+ propertyPath: m_LocalRotation.w
+ value: 1
+ objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalRotation.x
value: 0
@@ -884,14 +937,6 @@ PrefabInstance:
propertyPath: m_LocalRotation.z
value: 0
objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_LocalRotation.w
- value: 1
- objectReference: {fileID: 0}
- - target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
- propertyPath: m_RootOrder
- value: 5
- objectReference: {fileID: 0}
- target: {fileID: 4679453577574622, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
@@ -904,5 +949,10 @@ PrefabInstance:
propertyPath: m_LocalEulerAnglesHint.z
value: 0
objectReference: {fileID: 0}
+ - target: {fileID: 7705253412956426214, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6,
+ type: 3}
+ propertyPath: MaxStep
+ value: 500
+ objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: ec49a7b8b70a24ab48d7ca0bf5a063a6, type: 3}
diff --git a/Project/Assets/ML-Agents/Examples/Pyramids/Prefabs/AreaPB.prefab b/Project/Assets/ML-Agents/Examples/Pyramids/Prefabs/AreaPB.prefab
index e665a66219..3fc47a925a 100644
--- a/Project/Assets/ML-Agents/Examples/Pyramids/Prefabs/AreaPB.prefab
+++ b/Project/Assets/ML-Agents/Examples/Pyramids/Prefabs/AreaPB.prefab
@@ -1509,7 +1509,7 @@ GameObject:
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- - component: {fileID: 4661241202043188}
+ - component: {fileID: 46612412021.3188}
- component: {fileID: 33091290307090862}
- component: {fileID: 23221028280036978}
- component: {fileID: 65040597978940982}
@@ -1520,7 +1520,7 @@ GameObject:
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
---- !u!4 &4661241202043188
+--- !u!4 &46612412021.3188
Transform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
@@ -1623,7 +1623,7 @@ Transform:
- {fileID: 4463282243387382}
- {fileID: 4087387112911168}
- {fileID: 4248851000827934}
- - {fileID: 4661241202043188}
+ - {fileID: 46612412021.3188}
- {fileID: 4676865447046996}
- {fileID: 4404525033858484}
- {fileID: 4912732971874350}
diff --git a/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/U_Logo_White_RGB.png b/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/U_Logo_White_RGB.png
new file mode 100644
index 0000000000..385a5558fa
Binary files /dev/null and b/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/U_Logo_White_RGB.png differ
diff --git a/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/UnityLogo.png.meta b/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/U_Logo_White_RGB.png.meta
similarity index 71%
rename from Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/UnityLogo.png.meta
rename to Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/U_Logo_White_RGB.png.meta
index ab0294dd90..c339b6cd70 100644
--- a/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/UnityLogo.png.meta
+++ b/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/U_Logo_White_RGB.png.meta
@@ -1,9 +1,9 @@
fileFormatVersion: 2
-guid: 2e85738fe64714cffbf72f0f11de6307
+guid: ff9a4fb150ec44c1dae2f2c249a05286
TextureImporter:
- fileIDToRecycleName: {}
+ internalIDToNameTable: []
externalObjects: {}
- serializedVersion: 9
+ serializedVersion: 11
mipmaps:
mipMapMode: 0
enableMipMap: 0
@@ -23,6 +23,7 @@ TextureImporter:
isReadable: 0
streamingMipmaps: 0
streamingMipmapsPriority: 0
+ vTOnly: 0
grayScaleToAlpha: 0
generateCubemap: 6
cubemapConvolution: 0
@@ -31,12 +32,12 @@ TextureImporter:
maxTextureSize: 2048
textureSettings:
serializedVersion: 2
- filterMode: -1
- aniso: -1
- mipBias: -100
+ filterMode: 1
+ aniso: 1
+ mipBias: 0
wrapU: 1
wrapV: 1
- wrapW: -1
+ wrapW: 0
nPOTScale: 0
lightmap: 0
compressionQuality: 50
@@ -54,11 +55,15 @@ TextureImporter:
textureType: 8
textureShape: 1
singleChannelComponent: 0
+ flipbookRows: 1
+ flipbookColumns: 1
maxTextureSizeSet: 0
compressionQualitySet: 0
textureFormatSet: 0
+ ignorePngGamma: 0
+ applyGammaDecoding: 0
platformSettings:
- - serializedVersion: 2
+ - serializedVersion: 3
buildTarget: DefaultTexturePlatform
maxTextureSize: 2048
resizeAlgorithm: 0
@@ -69,7 +74,8 @@ TextureImporter:
allowsAlphaSplitting: 0
overridden: 0
androidETC2FallbackOverride: 0
- - serializedVersion: 2
+ forceMaximumCompressionQuality_BC6H_BC7: 0
+ - serializedVersion: 3
buildTarget: Standalone
maxTextureSize: 2048
resizeAlgorithm: 0
@@ -80,39 +86,20 @@ TextureImporter:
allowsAlphaSplitting: 0
overridden: 0
androidETC2FallbackOverride: 0
- - serializedVersion: 2
- buildTarget: iPhone
- maxTextureSize: 2048
- resizeAlgorithm: 0
- textureFormat: -1
- textureCompression: 1
- compressionQuality: 50
- crunchedCompression: 0
- allowsAlphaSplitting: 0
- overridden: 0
- androidETC2FallbackOverride: 0
- - serializedVersion: 2
- buildTarget: Android
- maxTextureSize: 2048
- resizeAlgorithm: 0
- textureFormat: -1
- textureCompression: 1
- compressionQuality: 50
- crunchedCompression: 0
- allowsAlphaSplitting: 0
- overridden: 0
- androidETC2FallbackOverride: 0
+ forceMaximumCompressionQuality_BC6H_BC7: 0
spriteSheet:
serializedVersion: 2
sprites: []
outline: []
physicsShape: []
bones: []
- spriteID: b33f877521c6d4d8782f018141dc1d6a
+ spriteID: 5e97eb03825dee720800000000000000
+ internalID: 0
vertices: []
indices:
edges: []
weights: []
+ secondaryTextures: []
spritePackingTag:
pSDRemoveMatte: 0
pSDShowRemoveMatteOption: 0
diff --git a/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/UnityLogo.png b/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/UnityLogo.png
deleted file mode 100644
index 7a955ee8ea..0000000000
Binary files a/Project/Assets/ML-Agents/Examples/SharedAssets/Materials/Textures/UnityLogo.png and /dev/null differ
diff --git a/Project/Assets/ML-Agents/Examples/SharedAssets/Prefabs/Canvas_Watermark.prefab b/Project/Assets/ML-Agents/Examples/SharedAssets/Prefabs/Canvas_Watermark.prefab
index 9dcc81245d..078274d921 100644
--- a/Project/Assets/ML-Agents/Examples/SharedAssets/Prefabs/Canvas_Watermark.prefab
+++ b/Project/Assets/ML-Agents/Examples/SharedAssets/Prefabs/Canvas_Watermark.prefab
@@ -1,22 +1,12 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
---- !u!1001 &100100000
-Prefab:
- m_ObjectHideFlags: 1
- serializedVersion: 2
- m_Modification:
- m_TransformParent: {fileID: 0}
- m_Modifications: []
- m_RemovedComponents: []
- m_ParentPrefab: {fileID: 0}
- m_RootGameObject: {fileID: 1537641056927260}
- m_IsPrefabParent: 1
--- !u!1 &1508578353888260
GameObject:
m_ObjectHideFlags: 0
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
- serializedVersion: 5
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
+ serializedVersion: 6
m_Component:
- component: {fileID: 224796324260922368}
- component: {fileID: 222875034646499690}
@@ -28,76 +18,133 @@ GameObject:
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
---- !u!1 &1537641056927260
-GameObject:
+--- !u!224 &224796324260922368
+RectTransform:
m_ObjectHideFlags: 0
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
- serializedVersion: 5
- m_Component:
- - component: {fileID: 224194346362733190}
- - component: {fileID: 223703725700644330}
- - component: {fileID: 114816648722094340}
- - component: {fileID: 114595077744033850}
- m_Layer: 5
- m_Name: Canvas_Watermark
- m_TagString: Untagged
- m_Icon: {fileID: 0}
- m_NavMeshLayer: 0
- m_StaticEditorFlags: 0
- m_IsActive: 1
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
+ m_GameObject: {fileID: 1508578353888260}
+ m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
+ m_LocalPosition: {x: 0, y: 0, z: 0}
+ m_LocalScale: {x: 0.3300893, y: 0.3300892, z: 0.3300892}
+ m_Children: []
+ m_Father: {fileID: 224194346362733190}
+ m_RootOrder: 0
+ m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
+ m_AnchorMin: {x: 1, y: 1}
+ m_AnchorMax: {x: 1, y: 1}
+ m_AnchoredPosition: {x: -209, y: -116}
+ m_SizeDelta: {x: 715.7, y: 715.69995}
+ m_Pivot: {x: 0.5, y: 0.5}
+--- !u!222 &222875034646499690
+CanvasRenderer:
+ m_ObjectHideFlags: 0
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
+ m_GameObject: {fileID: 1508578353888260}
+ m_CullTransparentMesh: 1
--- !u!114 &114223610671736162
MonoBehaviour:
- m_ObjectHideFlags: 1
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
+ m_ObjectHideFlags: 0
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1508578353888260}
m_Enabled: 1
m_EditorHideFlags: 0
- m_Script: {fileID: -765806418, guid: f70555f144d8491a825f0804e09c671c, type: 3}
+ m_Script: {fileID: 11500000, guid: fe87c0e1cc204ed48ad3b37840f39efc, type: 3}
m_Name:
m_EditorClassIdentifier:
m_Material: {fileID: 0}
m_Color: {r: 1, g: 1, b: 1, a: 1}
m_RaycastTarget: 1
+ m_RaycastPadding: {x: 0, y: 0, z: 0, w: 0}
+ m_Maskable: 1
m_OnCullStateChanged:
m_PersistentCalls:
m_Calls: []
- m_TypeName: UnityEngine.UI.MaskableGraphic+CullStateChangedEvent, UnityEngine.UI,
- Version=1.0.0.0, Culture=neutral, PublicKeyToken=null
- m_Sprite: {fileID: 21300000, guid: 2e85738fe64714cffbf72f0f11de6307, type: 3}
+ m_Sprite: {fileID: 21300000, guid: ff9a4fb150ec44c1dae2f2c249a05286, type: 3}
m_Type: 0
- m_PreserveAspect: 0
+ m_PreserveAspect: 1
m_FillCenter: 1
m_FillMethod: 4
m_FillAmount: 1
m_FillClockwise: 1
m_FillOrigin: 0
---- !u!114 &114595077744033850
-MonoBehaviour:
- m_ObjectHideFlags: 1
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
+ m_UseSpriteMesh: 0
+ m_PixelsPerUnitMultiplier: 1
+--- !u!1 &1537641056927260
+GameObject:
+ m_ObjectHideFlags: 0
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
+ serializedVersion: 6
+ m_Component:
+ - component: {fileID: 224194346362733190}
+ - component: {fileID: 223703725700644330}
+ - component: {fileID: 114816648722094340}
+ - component: {fileID: 114595077744033850}
+ m_Layer: 5
+ m_Name: Canvas_Watermark
+ m_TagString: Untagged
+ m_Icon: {fileID: 0}
+ m_NavMeshLayer: 0
+ m_StaticEditorFlags: 0
+ m_IsActive: 1
+--- !u!224 &224194346362733190
+RectTransform:
+ m_ObjectHideFlags: 0
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
+ m_GameObject: {fileID: 1537641056927260}
+ m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
+ m_LocalPosition: {x: 0, y: 0, z: 0}
+ m_LocalScale: {x: 0, y: 0, z: 0}
+ m_Children:
+ - {fileID: 224796324260922368}
+ m_Father: {fileID: 0}
+ m_RootOrder: 0
+ m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
+ m_AnchorMin: {x: 0, y: 0}
+ m_AnchorMax: {x: 0, y: 0}
+ m_AnchoredPosition: {x: 0, y: 0}
+ m_SizeDelta: {x: 0, y: 0}
+ m_Pivot: {x: 0, y: 0}
+--- !u!223 &223703725700644330
+Canvas:
+ m_ObjectHideFlags: 0
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1537641056927260}
m_Enabled: 1
- m_EditorHideFlags: 0
- m_Script: {fileID: 1301386320, guid: f70555f144d8491a825f0804e09c671c, type: 3}
- m_Name:
- m_EditorClassIdentifier:
- m_IgnoreReversedGraphics: 1
- m_BlockingObjects: 0
- m_BlockingMask:
- serializedVersion: 2
- m_Bits: 4294967295
+ serializedVersion: 3
+ m_RenderMode: 0
+ m_Camera: {fileID: 0}
+ m_PlaneDistance: 100
+ m_PixelPerfect: 0
+ m_ReceivesEvents: 1
+ m_OverrideSorting: 0
+ m_OverridePixelPerfect: 0
+ m_SortingBucketNormalizedSize: 0
+ m_AdditionalShaderChannelsFlag: 0
+ m_SortingLayerID: 0
+ m_SortingOrder: 0
+ m_TargetDisplay: 0
--- !u!114 &114816648722094340
MonoBehaviour:
- m_ObjectHideFlags: 1
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
+ m_ObjectHideFlags: 0
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1537641056927260}
m_Enabled: 1
m_EditorHideFlags: 0
- m_Script: {fileID: 1980459831, guid: f70555f144d8491a825f0804e09c671c, type: 3}
+ m_Script: {fileID: 11500000, guid: 0cd44c1031e13a943bb63640046fad76, type: 3}
m_Name:
m_EditorClassIdentifier:
m_UiScaleMode: 1
@@ -110,66 +157,21 @@ MonoBehaviour:
m_FallbackScreenDPI: 96
m_DefaultSpriteDPI: 96
m_DynamicPixelsPerUnit: 1
---- !u!222 &222875034646499690
-CanvasRenderer:
- m_ObjectHideFlags: 1
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
- m_GameObject: {fileID: 1508578353888260}
---- !u!223 &223703725700644330
-Canvas:
- m_ObjectHideFlags: 1
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
+ m_PresetInfoIsWorld: 0
+--- !u!114 &114595077744033850
+MonoBehaviour:
+ m_ObjectHideFlags: 0
+ m_CorrespondingSourceObject: {fileID: 0}
+ m_PrefabInstance: {fileID: 0}
+ m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1537641056927260}
m_Enabled: 1
- serializedVersion: 3
- m_RenderMode: 0
- m_Camera: {fileID: 0}
- m_PlaneDistance: 100
- m_PixelPerfect: 0
- m_ReceivesEvents: 1
- m_OverrideSorting: 0
- m_OverridePixelPerfect: 0
- m_SortingBucketNormalizedSize: 0
- m_AdditionalShaderChannelsFlag: 0
- m_SortingLayerID: 0
- m_SortingOrder: 0
- m_TargetDisplay: 0
---- !u!224 &224194346362733190
-RectTransform:
- m_ObjectHideFlags: 1
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
- m_GameObject: {fileID: 1537641056927260}
- m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
- m_LocalPosition: {x: 0, y: 0, z: 0}
- m_LocalScale: {x: 0, y: 0, z: 0}
- m_Children:
- - {fileID: 224796324260922368}
- m_Father: {fileID: 0}
- m_RootOrder: 0
- m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
- m_AnchorMin: {x: 0, y: 0}
- m_AnchorMax: {x: 0, y: 0}
- m_AnchoredPosition: {x: 0, y: 0}
- m_SizeDelta: {x: 0, y: 0}
- m_Pivot: {x: 0, y: 0}
---- !u!224 &224796324260922368
-RectTransform:
- m_ObjectHideFlags: 1
- m_PrefabParentObject: {fileID: 0}
- m_PrefabInternal: {fileID: 100100000}
- m_GameObject: {fileID: 1508578353888260}
- m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
- m_LocalPosition: {x: 0, y: 0, z: 0}
- m_LocalScale: {x: 0.5588671, y: 0.558867, z: 0.558867}
- m_Children: []
- m_Father: {fileID: 224194346362733190}
- m_RootOrder: 0
- m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
- m_AnchorMin: {x: 1, y: 1}
- m_AnchorMax: {x: 1, y: 1}
- m_AnchoredPosition: {x: -209, y: -116}
- m_SizeDelta: {x: 715.7, y: 715.69995}
- m_Pivot: {x: 0.5, y: 0.5}
+ m_EditorHideFlags: 0
+ m_Script: {fileID: 11500000, guid: dc42784cf147c0c48a680349fa168899, type: 3}
+ m_Name:
+ m_EditorClassIdentifier:
+ m_IgnoreReversedGraphics: 1
+ m_BlockingObjects: 0
+ m_BlockingMask:
+ serializedVersion: 2
+ m_Bits: 4294967295
diff --git a/Project/Packages/manifest.json b/Project/Packages/manifest.json
index ea720f3454..7395e3d6c9 100644
--- a/Project/Packages/manifest.json
+++ b/Project/Packages/manifest.json
@@ -6,7 +6,7 @@
"com.unity.ml-agents.extensions": "file:../../com.unity.ml-agents.extensions",
"com.unity.nuget.newtonsoft-json": "2.0.0",
"com.unity.test-framework": "1.1.29",
- "com.unity.toolchain.macos-x86_64-linux-x86_64": "0.1.20-preview",
+ "com.unity.toolchain.macos-x86_64-linux-x86_64": "2.0.3",
"com.unity.ugui": "1.0.0",
"com.unity.modules.imageconversion": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0",
diff --git a/Project/Packages/packages-lock.json b/Project/Packages/packages-lock.json
index 7d765c0063..b92d60ef54 100644
--- a/Project/Packages/packages-lock.json
+++ b/Project/Packages/packages-lock.json
@@ -1,7 +1,7 @@
{
"dependencies": {
"com.unity.barracuda": {
- "version": "2.3.1-preview",
+ "version": "3.0.0",
"depth": 1,
"source": "registry",
"dependencies": {
@@ -12,7 +12,7 @@
"url": "https://packages.unity.com"
},
"com.unity.burst": {
- "version": "1.6.0",
+ "version": "1.6.6",
"depth": 2,
"source": "registry",
"dependencies": {
@@ -44,7 +44,7 @@
"url": "https://packages.unity.com"
},
"com.unity.mathematics": {
- "version": "1.2.1",
+ "version": "1.2.6",
"depth": 3,
"source": "registry",
"dependencies": {},
@@ -55,7 +55,7 @@
"depth": 0,
"source": "local",
"dependencies": {
- "com.unity.barracuda": "2.3.1-preview",
+ "com.unity.barracuda": "3.0.0",
"com.unity.modules.imageconversion": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0"
}
@@ -77,18 +77,18 @@
"url": "https://packages.unity.com"
},
"com.unity.sysroot": {
- "version": "0.1.19-preview",
+ "version": "2.0.4",
"depth": 1,
"source": "registry",
"dependencies": {},
"url": "https://packages.unity.com"
},
"com.unity.sysroot.linux-x86_64": {
- "version": "0.1.14-preview",
+ "version": "2.0.3",
"depth": 1,
"source": "registry",
"dependencies": {
- "com.unity.sysroot": "0.1.18-preview"
+ "com.unity.sysroot": "2.0.4"
},
"url": "https://packages.unity.com"
},
@@ -104,12 +104,12 @@
"url": "https://packages.unity.com"
},
"com.unity.toolchain.macos-x86_64-linux-x86_64": {
- "version": "0.1.20-preview",
+ "version": "2.0.3",
"depth": 0,
"source": "registry",
"dependencies": {
- "com.unity.sysroot": "0.1.19-preview",
- "com.unity.sysroot.linux-x86_64": "0.1.14-preview"
+ "com.unity.sysroot": "2.0.4",
+ "com.unity.sysroot.linux-x86_64": "2.0.3"
},
"url": "https://packages.unity.com"
},
diff --git a/Project/ProjectSettings/ProjectSettings.asset b/Project/ProjectSettings/ProjectSettings.asset
index 5895e58e3d..0685899de9 100644
--- a/Project/ProjectSettings/ProjectSettings.asset
+++ b/Project/ProjectSettings/ProjectSettings.asset
@@ -3,7 +3,7 @@
--- !u!129 &1
PlayerSettings:
m_ObjectHideFlags: 0
- serializedVersion: 22
+ serializedVersion: 23
productGUID: cd7e9a0e0d1d14312ad9e89757262f3b
AndroidProfiler: 0
AndroidFilterTouchesWhenObscured: 0
@@ -145,23 +145,25 @@ PlayerSettings:
enable360StereoCapture: 0
isWsaHolographicRemotingEnabled: 0
enableFrameTimingStats: 0
+ enableOpenGLProfilerGPURecorders: 1
useHDRDisplay: 0
D3DHDRBitDepth: 0
m_ColorGamuts: 00000000
targetPixelDensity: 30
resolutionScalingMode: 0
+ resetResolutionOnWindowResize: 0
androidSupportedAspectRatio: 1
androidMaxAspectRatio: 2.1
applicationIdentifier:
Android: com.Company.ProductName
- Standalone: com.UnityTechnologies.UnityEnvironment
+ Standalone: com.Unity-Technologies.UnityEnvironment
buildNumber:
Standalone: 0
iPhone: 0
tvOS: 0
overrideDefaultApplicationIdentifier: 0
AndroidBundleVersionCode: 1
- AndroidMinSdkVersion: 19
+ AndroidMinSdkVersion: 22
AndroidTargetSdkVersion: 0
AndroidPreferredInstallLocation: 1
aotOptions: nimt-trampolines=1024
@@ -217,6 +219,7 @@ PlayerSettings:
iOSLaunchScreeniPadCustomStoryboardPath:
iOSDeviceRequirements: []
iOSURLSchemes: []
+ macOSURLSchemes: []
iOSBackgroundModes: 0
iOSMetalForceHardShadows: 0
metalEditorSupport: 1
@@ -311,6 +314,9 @@ PlayerSettings:
- m_BuildTarget: iOSSupport
m_APIs: 10000000
m_Automatic: 1
+ - m_BuildTarget: AndroidPlayer
+ m_APIs: 0b00000008000000
+ m_Automatic: 0
m_BuildTargetVRSettings: []
openGLRequireES31: 0
openGLRequireES31AEP: 0
@@ -329,6 +335,7 @@ PlayerSettings:
m_EncodingQuality: 1
m_BuildTargetGroupLightmapSettings: []
m_BuildTargetNormalMapEncoding: []
+ m_BuildTargetDefaultTextureCompressionFormat: []
playModeTestRunnerEnabled: 0
runPlayModeTestAsEditModeTest: 0
actionOnDotNetUnhandledException: 1
@@ -347,6 +354,7 @@ PlayerSettings:
switchScreenResolutionBehavior: 2
switchUseCPUProfiler: 0
switchUseGOLDLinker: 0
+ switchLTOSetting: 0
switchApplicationID: 0x0005000C10000001
switchNSODependencies:
switchTitleNames_0:
@@ -477,7 +485,9 @@ PlayerSettings:
switchPlayerConnectionEnabled: 1
switchUseNewStyleFilepaths: 0
switchUseMicroSleepForYield: 1
+ switchEnableRamDiskSupport: 0
switchMicroSleepForYieldTime: 25
+ switchRamDiskSpaceSize: 12
ps4NPAgeRating: 12
ps4NPTitleSecret:
ps4NPTrophyPackPath:
@@ -574,18 +584,15 @@ PlayerSettings:
webGLThreadsSupport: 0
webGLDecompressionFallback: 0
scriptingDefineSymbols:
- 1:
- 7: UNITY_POST_PROCESSING_STACK_V2
- 13: UNITY_POST_PROCESSING_STACK_V2
- 14: UNITY_POST_PROCESSING_STACK_V2
- 17: UNITY_POST_PROCESSING_STACK_V2
- 18: UNITY_POST_PROCESSING_STACK_V2
- 19: UNITY_POST_PROCESSING_STACK_V2
- 21: UNITY_POST_PROCESSING_STACK_V2
- 23: UNITY_POST_PROCESSING_STACK_V2
- 25: UNITY_POST_PROCESSING_STACK_V2
- 26: UNITY_POST_PROCESSING_STACK_V2
- 27: UNITY_POST_PROCESSING_STACK_V2
+ : UNITY_POST_PROCESSING_STACK_V2
+ Android: UNITY_POST_PROCESSING_STACK_V2
+ Nintendo Switch: UNITY_POST_PROCESSING_STACK_V2
+ PS4: UNITY_POST_PROCESSING_STACK_V2
+ Standalone:
+ WebGL: UNITY_POST_PROCESSING_STACK_V2
+ Windows Store Apps: UNITY_POST_PROCESSING_STACK_V2
+ XboxOne: UNITY_POST_PROCESSING_STACK_V2
+ tvOS: UNITY_POST_PROCESSING_STACK_V2
additionalCompilerArguments: {}
platformArchitecture: {}
scriptingBackend: {}
@@ -595,7 +602,6 @@ PlayerSettings:
suppressCommonWarnings: 1
allowUnsafeCode: 0
useDeterministicCompilation: 1
- useReferenceAssemblies: 1
enableRoslynAnalyzers: 1
additionalIl2CppArgs:
scriptingRuntimeVersion: 1
@@ -633,6 +639,7 @@ PlayerSettings:
metroFTAName:
metroFTAFileTypes: []
metroProtocolName:
+ vcxProjDefaultLanguage:
XboxOneProductId:
XboxOneUpdateKey:
XboxOneSandboxId:
@@ -682,4 +689,6 @@ PlayerSettings:
organizationId:
cloudEnabled: 0
legacyClampBlendShapeWeights: 1
+ playerDataPath:
+ forceSRGBBlit: 1
virtualTexturingSupportEnabled: 0
diff --git a/Project/ProjectSettings/ProjectVersion.txt b/Project/ProjectSettings/ProjectVersion.txt
index 4c9401b919..8ea1b855ae 100644
--- a/Project/ProjectSettings/ProjectVersion.txt
+++ b/Project/ProjectSettings/ProjectVersion.txt
@@ -1,2 +1,2 @@
-m_EditorVersion: 2020.3.25f1
-m_EditorVersionWithRevision: 2020.3.25f1 (9b9180224418)
+m_EditorVersion: 2021.3.11f1
+m_EditorVersionWithRevision: 2021.3.11f1 (0a5ca18544bf)
diff --git a/README.md b/README.md
deleted file mode 100644
index 4227a3424d..0000000000
--- a/README.md
+++ /dev/null
@@ -1,189 +0,0 @@
-
-
-# Unity ML-Agents Toolkit
-
-[](https://github.com/Unity-Technologies/ml-agents/tree/release_19_docs/docs/)
-
-[](LICENSE.md)
-
-([latest release](https://github.com/Unity-Technologies/ml-agents/releases/tag/latest_release))
-([all releases](https://github.com/Unity-Technologies/ml-agents/releases))
-
-**The Unity Machine Learning Agents Toolkit** (ML-Agents) is an open-source
-project that enables games and simulations to serve as environments for
-training intelligent agents. We provide implementations (based on PyTorch)
-of state-of-the-art algorithms to enable game developers and hobbyists to easily
-train intelligent agents for 2D, 3D and VR/AR games. Researchers can also use the
-provided simple-to-use Python API to train Agents using reinforcement learning,
-imitation learning, neuroevolution, or any other methods. These trained agents can be
-used for multiple purposes, including controlling NPC behavior (in a variety of
-settings such as multi-agent and adversarial), automated testing of game builds
-and evaluating different game design decisions pre-release. The ML-Agents
-Toolkit is mutually beneficial for both game developers and AI researchers as it
-provides a central platform where advances in AI can be evaluated on Unity’s
-rich environments and then made accessible to the wider research and game
-developer communities.
-
-## Features
-
-- 18+ [example Unity environments](docs/Learning-Environment-Examples.md)
-- Support for multiple environment configurations and training scenarios
-- Flexible Unity SDK that can be integrated into your game or custom Unity scene
-- Support for training single-agent, multi-agent cooperative, and multi-agent
- competitive scenarios via several Deep Reinforcement Learning algorithms (PPO, SAC, MA-POCA, self-play).
-- Support for learning from demonstrations through two Imitation Learning algorithms (BC and GAIL).
-- Easily definable Curriculum Learning scenarios for complex tasks
-- Train robust agents using environment randomization
-- Flexible agent control with On Demand Decision Making
-- Train using multiple concurrent Unity environment instances
-- Utilizes the [Unity Inference Engine](docs/Unity-Inference-Engine.md) to
- provide native cross-platform support
-- Unity environment [control from Python](docs/Python-LLAPI.md)
-- Wrap Unity learning environments as a [gym](docs/Python-Gym-API.md)
-
-See our [ML-Agents Overview](docs/ML-Agents-Overview.md) page for detailed
-descriptions of all these features.
-
-## Releases & Documentation
-
-**Our latest, stable release is `Release 19`. Click
-[here](https://github.com/Unity-Technologies/ml-agents/tree/release_19_docs/docs/Readme.md)
-to get started with the latest release of ML-Agents.**
-
-The table below lists all our releases, including our `main` branch which is
-under active development and may be unstable. A few helpful guidelines:
-- The [Versioning page](docs/Versioning.md) overviews how we manage our GitHub
- releases and the versioning process for each of the ML-Agents components.
-- The [Releases page](https://github.com/Unity-Technologies/ml-agents/releases)
- contains details of the changes between releases.
-- The [Migration page](docs/Migrating.md) contains details on how to upgrade
- from earlier releases of the ML-Agents Toolkit.
-- The **Documentation** links in the table below include installation and usage
- instructions specific to each release. Remember to always use the
- documentation that corresponds to the release version you're using.
-- The `com.unity.ml-agents` package is [verified](https://docs.unity3d.com/2020.1/Documentation/Manual/pack-safe.html)
- for Unity 2020.1 and later. Verified packages releases are numbered 1.0.x.
-
-| **Version** | **Release Date** | **Source** | **Documentation** | **Download** | **Python Package** | **Unity Package** |
-|:--------------------------:|:--------------------:|:--------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------:|:-------------------------------------------------------:|:----------------------------------------------------------------------------------------:|
-| **main (unstable)** | -- | [source](https://github.com/Unity-Technologies/ml-agents/tree/main) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/main/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/main.zip) | -- | -- |
-| **Release 19** | **January 14, 2022** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/release_19)** | **[docs](https://github.com/Unity-Technologies/ml-agents/tree/release_19_docs/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/release_19.zip)** | **[0.28.0](https://pypi.org/project/mlagents/0.28.0/)** | -- |
-| **Release 18** | **June 09, 2021** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/release_18)** | **[docs](https://github.com/Unity-Technologies/ml-agents/tree/release_18_docs/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/release_18.zip)** | **[0.27.0](https://pypi.org/project/mlagents/0.27.0/)** | **[2.1.0](https://docs.unity3d.com/Packages/com.unity.ml-agents@2.1/manual/index.html)** |
-| **Verified Package 1.0.8** | **May 26, 2021** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/com.unity.ml-agents_1.0.8)** | **[docs](https://github.com/Unity-Technologies/ml-agents/blob/release_2_verified_docs/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/com.unity.ml-agents_1.0.8.zip)** | **[0.16.1](https://pypi.org/project/mlagents/0.16.1/)** | **[1.0.8](https://docs.unity3d.com/Packages/com.unity.ml-agents@1.0/manual/index.html)** |
-
-If you are a researcher interested in a discussion of Unity as an AI platform,
-see a pre-print of our
-[reference paper on Unity and the ML-Agents Toolkit](https://arxiv.org/abs/1809.02627).
-
-If you use Unity or the ML-Agents Toolkit to conduct research, we ask that you
-cite the following paper as a reference:
-
-Juliani, A., Berges, V., Teng, E., Cohen, A., Harper, J., Elion, C., Goy, C.,
-Gao, Y., Henry, H., Mattar, M., Lange, D. (2020). Unity: A General Platform for
-Intelligent Agents. _arXiv preprint
-[arXiv:1809.02627](https://arxiv.org/abs/1809.02627)._
-https://github.com/Unity-Technologies/ml-agents.
-
-## Additional Resources
-
-We have a Unity Learn course,
-[ML-Agents: Hummingbirds](https://learn.unity.com/course/ml-agents-hummingbirds),
-that provides a gentle introduction to Unity and the ML-Agents Toolkit.
-
-We've also partnered with
-[CodeMonkeyUnity](https://www.youtube.com/c/CodeMonkeyUnity) to create a
-[series of tutorial videos](https://www.youtube.com/playlist?list=PLzDRvYVwl53vehwiN_odYJkPBzcqFw110)
-on how to implement and use the ML-Agents Toolkit.
-
-We have also published a series of blog posts that are relevant for ML-Agents:
-
-- (July 12, 2021)
- [ML-Agents plays Dodgeball](https://blog.unity.com/technology/ml-agents-plays-dodgeball)
-- (May 5, 2021)
- [ML-Agents v2.0 release: Now supports training complex cooperative behaviors](https://blogs.unity3d.com/2021/05/05/ml-agents-v2-0-release-now-supports-training-complex-cooperative-behaviors/)
-- (December 28, 2020)
- [Happy holidays from the Unity ML-Agents team!](https://blogs.unity3d.com/2020/12/28/happy-holidays-from-the-unity-ml-agents-team/)
-- (November 20, 2020)
- [How Eidos-Montréal created Grid Sensors to improve observations for training agents](https://blogs.unity3d.com/2020/11/20/how-eidos-montreal-created-grid-sensors-to-improve-observations-for-training-agents/)
-- (November 11, 2020)
- [2020 AI@Unity interns shoutout](https://blogs.unity3d.com/2020/11/11/2020-aiunity-interns-shoutout/)
-- (May 12, 2020)
- [Announcing ML-Agents Unity Package v1.0!](https://blogs.unity3d.com/2020/05/12/announcing-ml-agents-unity-package-v1-0/)
-- (February 28, 2020)
- [Training intelligent adversaries using self-play with ML-Agents](https://blogs.unity3d.com/2020/02/28/training-intelligent-adversaries-using-self-play-with-ml-agents/)
-- (November 11, 2019)
- [Training your agents 7 times faster with ML-Agents](https://blogs.unity3d.com/2019/11/11/training-your-agents-7-times-faster-with-ml-agents/)
-- (October 21, 2019)
- [The AI@Unity interns help shape the world](https://blogs.unity3d.com/2019/10/21/the-aiunity-interns-help-shape-the-world/)
-- (April 15, 2019)
- [Unity ML-Agents Toolkit v0.8: Faster training on real games](https://blogs.unity3d.com/2019/04/15/unity-ml-agents-toolkit-v0-8-faster-training-on-real-games/)
-- (March 1, 2019)
- [Unity ML-Agents Toolkit v0.7: A leap towards cross-platform inference](https://blogs.unity3d.com/2019/03/01/unity-ml-agents-toolkit-v0-7-a-leap-towards-cross-platform-inference/)
-- (December 17, 2018)
- [ML-Agents Toolkit v0.6: Improved usability of Brains and Imitation Learning](https://blogs.unity3d.com/2018/12/17/ml-agents-toolkit-v0-6-improved-usability-of-brains-and-imitation-learning/)
-- (October 2, 2018)
- [Puppo, The Corgi: Cuteness Overload with the Unity ML-Agents Toolkit](https://blogs.unity3d.com/2018/10/02/puppo-the-corgi-cuteness-overload-with-the-unity-ml-agents-toolkit/)
-- (September 11, 2018)
- [ML-Agents Toolkit v0.5, new resources for AI researchers available now](https://blogs.unity3d.com/2018/09/11/ml-agents-toolkit-v0-5-new-resources-for-ai-researchers-available-now/)
-- (June 26, 2018)
- [Solving sparse-reward tasks with Curiosity](https://blogs.unity3d.com/2018/06/26/solving-sparse-reward-tasks-with-curiosity/)
-- (June 19, 2018)
- [Unity ML-Agents Toolkit v0.4 and Udacity Deep Reinforcement Learning Nanodegree](https://blogs.unity3d.com/2018/06/19/unity-ml-agents-toolkit-v0-4-and-udacity-deep-reinforcement-learning-nanodegree/)
-- (May 24, 2018)
- [Imitation Learning in Unity: The Workflow](https://blogs.unity3d.com/2018/05/24/imitation-learning-in-unity-the-workflow/)
-- (March 15, 2018)
- [ML-Agents Toolkit v0.3 Beta released: Imitation Learning, feedback-driven features, and more](https://blogs.unity3d.com/2018/03/15/ml-agents-v0-3-beta-released-imitation-learning-feedback-driven-features-and-more/)
-- (December 11, 2017)
- [Using Machine Learning Agents in a real game: a beginner’s guide](https://blogs.unity3d.com/2017/12/11/using-machine-learning-agents-in-a-real-game-a-beginners-guide/)
-- (December 8, 2017)
- [Introducing ML-Agents Toolkit v0.2: Curriculum Learning, new environments, and more](https://blogs.unity3d.com/2017/12/08/introducing-ml-agents-v0-2-curriculum-learning-new-environments-and-more/)
-- (September 19, 2017)
- [Introducing: Unity Machine Learning Agents Toolkit](https://blogs.unity3d.com/2017/09/19/introducing-unity-machine-learning-agents/)
-- Overviewing reinforcement learning concepts
- ([multi-armed bandit](https://blogs.unity3d.com/2017/06/26/unity-ai-themed-blog-entries/)
- and
- [Q-learning](https://blogs.unity3d.com/2017/08/22/unity-ai-reinforcement-learning-with-q-learning/))
-
-### More from Unity
-
-- [Unity Robotics](https://github.com/Unity-Technologies/Unity-Robotics-Hub)
-- [Unity Computer Vision](https://unity.com/computer-vision)
-- [Unity Game Simulation](https://unity.com/products/game-simulation)
-
-## Community and Feedback
-
-The ML-Agents Toolkit is an open-source project and we encourage and welcome
-contributions. If you wish to contribute, be sure to review our
-[contribution guidelines](com.unity.ml-agents/CONTRIBUTING.md) and
-[code of conduct](CODE_OF_CONDUCT.md).
-
-For problems with the installation and setup of the ML-Agents Toolkit, or
-discussions about how to best setup or train your agents, please create a new
-thread on the
-[Unity ML-Agents forum](https://forum.unity.com/forums/ml-agents.453/) and make
-sure to include as much detail as possible. If you run into any other problems
-using the ML-Agents Toolkit or have a specific feature request, please
-[submit a GitHub issue](https://github.com/Unity-Technologies/ml-agents/issues).
-
-Please tell us which samples you would like to see shipped with the ML-Agents Unity
-package by replying to
-[this forum thread](https://forum.unity.com/threads/feedback-wanted-shipping-sample-s-with-the-ml-agents-package.1073468/).
-
-
-Your opinion matters a great deal to us. Only by hearing your thoughts on the
-Unity ML-Agents Toolkit can we continue to improve and grow. Please take a few
-minutes to
-[let us know about it](https://unitysoftware.co1.qualtrics.com/jfe/form/SV_55pQKCZ578t0kbc).
-
-For any other questions or feedback, connect directly with the ML-Agents team at
-ml-agents@unity3d.com.
-
-## Privacy
-
-In order to improve the developer experience for Unity ML-Agents Toolkit, we have added in-editor analytics.
-Please refer to "Information that is passively collected by Unity" in the
-[Unity Privacy Policy](https://unity3d.com/legal/privacy-policy).
-
-## License
-
-[Apache License 2.0](LICENSE.md)
diff --git a/SURVEY.md b/SURVEY.md
index 8523a19d81..1eb6bb1b7b 100644
--- a/SURVEY.md
+++ b/SURVEY.md
@@ -2,6 +2,6 @@
Your opinion matters a great deal to us. Only by hearing your thoughts on the
Unity ML-Agents Toolkit can we continue to improve and grow. Please take a few
-minutes to let us know about it.
+minutes to let us know about it. Please email us at [ml-agents@unity3d.com](mailto:ml-agents@unity3d.com).
-[Fill out the survey](https://goo.gl/forms/qFMYSYr5TlINvG6f1)
+
diff --git a/colab/Colab_UnityEnvironment_1_Run.ipynb b/colab/Colab_UnityEnvironment_1_Run.ipynb
index f27fc3ff5d..d431c3d14f 100644
--- a/colab/Colab_UnityEnvironment_1_Run.ipynb
+++ b/colab/Colab_UnityEnvironment_1_Run.ipynb
@@ -145,7 +145,7 @@
" import mlagents\n",
" print(\"ml-agents already installed\")\n",
"except ImportError:\n",
- " !python -m pip install -q mlagents==0.28.0\n",
+ " !python -m pip install -q mlagents==0.29.0\n",
" print(\"Installed ml-agents\")"
],
"execution_count": null,
@@ -500,4 +500,4 @@
"outputs": []
}
]
-}
\ No newline at end of file
+}
diff --git a/colab/Colab_UnityEnvironment_2_Train.ipynb b/colab/Colab_UnityEnvironment_2_Train.ipynb
index 2c6cb798a1..dda37a4e18 100644
--- a/colab/Colab_UnityEnvironment_2_Train.ipynb
+++ b/colab/Colab_UnityEnvironment_2_Train.ipynb
@@ -135,7 +135,7 @@
" import mlagents\n",
" print(\"ml-agents already installed\")\n",
"except ImportError:\n",
- " !python -m pip install -q mlagents==0.28.0\n",
+ " !python -m pip install -q mlagents==0.29.0\n",
" print(\"Installed ml-agents\")"
],
"execution_count": null,
@@ -686,4 +686,4 @@
"outputs": []
}
]
-}
\ No newline at end of file
+}
diff --git a/colab/Colab_UnityEnvironment_3_SideChannel.ipynb b/colab/Colab_UnityEnvironment_3_SideChannel.ipynb
index 735a23380a..c053fff759 100644
--- a/colab/Colab_UnityEnvironment_3_SideChannel.ipynb
+++ b/colab/Colab_UnityEnvironment_3_SideChannel.ipynb
@@ -136,7 +136,7 @@
" import mlagents\n",
" print(\"ml-agents already installed\")\n",
"except ImportError:\n",
- " !python -m pip install -q mlagents==0.28.0\n",
+ " !python -m pip install -q mlagents==0.29.0\n",
" print(\"Installed ml-agents\")"
],
"execution_count": null,
@@ -290,4 +290,4 @@
]
}
]
-}
\ No newline at end of file
+}
diff --git a/colab/Colab_UnityEnvironment_4_SB3VectorEnv.ipynb b/colab/Colab_UnityEnvironment_4_SB3VectorEnv.ipynb
index 24685404be..8c2e671c05 100644
--- a/colab/Colab_UnityEnvironment_4_SB3VectorEnv.ipynb
+++ b/colab/Colab_UnityEnvironment_4_SB3VectorEnv.ipynb
@@ -1,20 +1,4 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "name": "Colab-UnityEnvironment-1-Run.ipynb",
- "private_outputs": true,
- "provenance": [],
- "collapsed_sections": [],
- "toc_visible": true
- },
- "kernelspec": {
- "name": "python3",
- "language": "python",
- "display_name": "Python 3"
- }
- },
"cells": [
{
"cell_type": "markdown",
@@ -38,6 +22,11 @@
{
"cell_type": "code",
"execution_count": null,
+ "metadata": {
+ "pycharm": {
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"#@title Install Rendering Dependencies { display-mode: \"form\" }\n",
@@ -112,13 +101,7 @@
" !bash frame-buffer start\n",
" os.environ[\"DISPLAY\"] = \":1\"\n",
"pro_bar.update(progress(100, 100))"
- ],
- "metadata": {
- "collapsed": false,
- "pycharm": {
- "name": "#%%\n"
- }
- }
+ ]
},
{
"cell_type": "markdown",
@@ -131,22 +114,22 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "N8yfQqkbebQ5",
"pycharm": {
"is_executing": true
}
},
+ "outputs": [],
"source": [
"try:\n",
" import mlagents\n",
" print(\"ml-agents already installed\")\n",
"except ImportError:\n",
- " !python -m pip install -q mlagents==0.28.0\n",
+ " !python -m pip install -q mlagents==0.29.0\n",
" print(\"Installed ml-agents\")"
- ],
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
@@ -168,38 +151,161 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "YSf-WhxbqtLw"
},
+ "outputs": [],
"source": [
- "from math import ceil\n",
+ "from dataclasses import dataclass\n",
+ "from pathlib import Path\n",
+ "from typing import Callable, Any\n",
+ "\n",
+ "import gym\n",
+ "from gym import Env\n",
"\n",
"from stable_baselines3 import PPO\n",
- "from stable_baselines3.common.vec_env import VecMonitor\n",
+ "from stable_baselines3.common.vec_env import VecMonitor, VecEnv, SubprocVecEnv\n",
+ "from supersuit import observation_lambda_v0\n",
"\n",
- "from mlagents_envs.envs.unity_vec_env import make_mla_sb3_env, LimitedConfig\n",
"\n",
- "# 250K should train to a reward ~= 0.90 for the \"Basic\" environment.\n",
- "# We set the value lower here to demonstrate just a small amount of trianing.\n",
- "TOTAL_TAINING_STEPS_GOAL = 40 * 1000\n",
- "NUM_ENVS = 12\n",
- "STEPS_PER_UPDATE = 2048"
- ],
- "execution_count": 29,
- "outputs": []
+ "from mlagents_envs.environment import UnityEnvironment\n",
+ "from mlagents_envs.envs.unity_gym_env import UnityToGymWrapper\n",
+ "from mlagents_envs.registry import UnityEnvRegistry, default_registry\n",
+ "from mlagents_envs.side_channel.engine_configuration_channel import (\n",
+ " EngineConfig,\n",
+ " EngineConfigurationChannel,\n",
+ ")\n",
+ "\n",
+ "NUM_ENVS = 8"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Environment and Engine Configurations"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Default values from CLI (See cli_utils.py)\n",
+ "DEFAULT_ENGINE_CONFIG = EngineConfig(\n",
+ " width=84,\n",
+ " height=84,\n",
+ " quality_level=4,\n",
+ " time_scale=20,\n",
+ " target_frame_rate=-1,\n",
+ " capture_frame_rate=60,\n",
+ ")\n",
+ "\n",
+ "# Some config subset of an actual config.yaml file for MLA.\n",
+ "@dataclass\n",
+ "class LimitedConfig:\n",
+ " # The local path to a Unity executable or the name of an entry in the registry.\n",
+ " env_path_or_name: str\n",
+ " base_port: int\n",
+ " base_seed: int = 0\n",
+ " num_env: int = 1\n",
+ " engine_config: EngineConfig = DEFAULT_ENGINE_CONFIG\n",
+ " visual_obs: bool = False\n",
+ " # TODO: Decide if we should just tell users to always use MultiInputPolicy so we can simplify the user workflow.\n",
+ " # WARNING: Make sure to use MultiInputPolicy if you turn this on.\n",
+ " allow_multiple_obs: bool = False\n",
+ " env_registry: UnityEnvRegistry = default_registry"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Unity Environment SB3 Factory"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def _unity_env_from_path_or_registry(\n",
+ " env: str, registry: UnityEnvRegistry, **kwargs: Any\n",
+ ") -> UnityEnvironment:\n",
+ " if Path(env).expanduser().absolute().exists():\n",
+ " return UnityEnvironment(file_name=env, **kwargs)\n",
+ " elif env in registry:\n",
+ " return registry.get(env).make(**kwargs)\n",
+ " else:\n",
+ " raise ValueError(f\"Environment '{env}' wasn't a local path or registry entry\")\n",
+ " \n",
+ "def make_mla_sb3_env(config: LimitedConfig, **kwargs: Any) -> VecEnv:\n",
+ " def handle_obs(obs, space):\n",
+ " if isinstance(space, gym.spaces.Tuple):\n",
+ " if len(space) == 1:\n",
+ " return obs[0]\n",
+ " # Turn the tuple into a dict (stable baselines can handle spaces.Dict but not spaces.Tuple).\n",
+ " return {str(i): v for i, v in enumerate(obs)}\n",
+ " return obs\n",
+ "\n",
+ " def handle_obs_space(space):\n",
+ " if isinstance(space, gym.spaces.Tuple):\n",
+ " if len(space) == 1:\n",
+ " return space[0]\n",
+ " # Turn the tuple into a dict (stable baselines can handle spaces.Dict but not spaces.Tuple).\n",
+ " return gym.spaces.Dict({str(i): v for i, v in enumerate(space)})\n",
+ " return space\n",
+ "\n",
+ " def create_env(env: str, worker_id: int) -> Callable[[], Env]:\n",
+ " def _f() -> Env:\n",
+ " engine_configuration_channel = EngineConfigurationChannel()\n",
+ " engine_configuration_channel.set_configuration(config.engine_config)\n",
+ " kwargs[\"side_channels\"] = kwargs.get(\"side_channels\", []) + [\n",
+ " engine_configuration_channel\n",
+ " ]\n",
+ " unity_env = _unity_env_from_path_or_registry(\n",
+ " env=env,\n",
+ " registry=config.env_registry,\n",
+ " worker_id=worker_id,\n",
+ " base_port=config.base_port,\n",
+ " seed=config.base_seed + worker_id,\n",
+ " **kwargs,\n",
+ " )\n",
+ " new_env = UnityToGymWrapper(\n",
+ " unity_env=unity_env,\n",
+ " uint8_visual=config.visual_obs,\n",
+ " allow_multiple_obs=config.allow_multiple_obs,\n",
+ " )\n",
+ " new_env = observation_lambda_v0(new_env, handle_obs, handle_obs_space)\n",
+ " return new_env\n",
+ "\n",
+ " return _f\n",
+ "\n",
+ " env_facts = [\n",
+ " create_env(config.env_path_or_name, worker_id=x) for x in range(config.num_env)\n",
+ " ]\n",
+ " return SubprocVecEnv(env_facts)"
+ ]
},
{
"cell_type": "markdown",
+ "metadata": {},
"source": [
"### Start Environment from the registry"
- ],
- "metadata": {
- "collapsed": false
- }
+ ]
},
{
"cell_type": "code",
"execution_count": null,
+ "metadata": {
+ "pycharm": {
+ "is_executing": true,
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
"# -----------------\n",
@@ -220,78 +326,84 @@
" ),\n",
" no_graphics=True, # Set to false if you are running locally and want to watch the environments move around as they train.\n",
")"
- ],
- "metadata": {
- "collapsed": false,
- "pycharm": {
- "name": "#%%\n",
- "is_executing": true
- }
- }
+ ]
},
{
"cell_type": "markdown",
+ "metadata": {},
"source": [
"### Create the model"
- ],
- "metadata": {
- "collapsed": false
- }
+ ]
},
{
"cell_type": "code",
"execution_count": null,
+ "metadata": {
+ "pycharm": {
+ "is_executing": true,
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
+ "# 250K should train to a reward ~= 0.90 for the \"Basic\" environment.\n",
+ "# We set the value lower here to demonstrate just a small amount of trianing.\n",
+ "BATCH_SIZE = 32\n",
+ "BUFFER_SIZE = 256\n",
+ "UPDATES = 50\n",
+ "TOTAL_TAINING_STEPS_GOAL = BUFFER_SIZE * UPDATES\n",
+ "BETA = 0.0005\n",
+ "N_EPOCHS = 3 \n",
+ "STEPS_PER_UPDATE = BUFFER_SIZE / NUM_ENVS\n",
+ "\n",
"# Helps gather stats for our eval() calls later so we can see reward stats.\n",
"env = VecMonitor(env)\n",
- "# Attempt to approximate settings from 3DBall.yaml\n",
+ "\n",
+ "#Policy and Value function with 2 layers of 128 units each and no shared layers.\n",
+ "policy_kwargs = {\"net_arch\" : [{\"pi\": [32,32], \"vf\": [32,32]}]}\n",
+ "\n",
"model = PPO(\n",
" \"MlpPolicy\",\n",
" env,\n",
" verbose=1,\n",
- " learning_rate=lambda prog: 0.0003 * (1.0 - prog),\n",
+ " learning_rate=lambda progress: 0.0003 * (1.0 - progress),\n",
+ " clip_range=lambda progress: 0.2 * (1.0 - progress),\n",
+ " clip_range_vf=lambda progress: 0.2 * (1.0 - progress),\n",
" # Uncomment this if you want to log tensorboard results when running this notebook locally.\n",
" # tensorboard_log=\"results\",\n",
+ " policy_kwargs=policy_kwargs,\n",
" n_steps=int(STEPS_PER_UPDATE),\n",
+ " batch_size=BATCH_SIZE,\n",
+ " n_epochs=N_EPOCHS,\n",
+ " ent_coef=BETA,\n",
")"
- ],
- "metadata": {
- "collapsed": false,
- "pycharm": {
- "name": "#%%\n",
- "is_executing": true
- }
- }
+ ]
},
{
"cell_type": "markdown",
+ "metadata": {},
"source": [
"### Train the model"
- ],
- "metadata": {
- "collapsed": false
- }
+ ]
},
{
"cell_type": "code",
"execution_count": null,
+ "metadata": {
+ "pycharm": {
+ "is_executing": true,
+ "name": "#%%\n"
+ }
+ },
"outputs": [],
"source": [
- "training_rounds = ceil(TOTAL_TAINING_STEPS_GOAL / int(STEPS_PER_UPDATE * NUM_ENVS))\n",
- "for i in range(training_rounds):\n",
- " print(f\"Training round {i + 1}/{training_rounds}\")\n",
+ "# 0.93 is considered solved for the Basic environment\n",
+ "for i in range(UPDATES):\n",
+ " print(f\"Training round {i + 1}/{UPDATES}\")\n",
" # NOTE: rest_num_timesteps should only happen the first time so that tensorboard logs are consistent.\n",
- " model.learn(total_timesteps=6000, reset_num_timesteps=(i == 0))\n",
+ " model.learn(total_timesteps=BUFFER_SIZE, reset_num_timesteps=(i == 0))\n",
" model.policy.eval()"
- ],
- "metadata": {
- "collapsed": false,
- "pycharm": {
- "name": "#%%\n",
- "is_executing": true
- }
- }
+ ]
},
{
"cell_type": "markdown",
@@ -305,6 +417,7 @@
},
{
"cell_type": "code",
+ "execution_count": null,
"metadata": {
"id": "vdWG6_SqtNtv",
"pycharm": {
@@ -312,12 +425,46 @@
"name": "#%%\n"
}
},
+ "outputs": [],
"source": [
"env.close()\n",
"print(\"Closed environment\")\n"
- ],
+ ]
+ },
+ {
+ "cell_type": "code",
"execution_count": null,
- "outputs": []
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [],
+ "name": "Colab-UnityEnvironment-1-Run.ipynb",
+ "private_outputs": true,
+ "provenance": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.8"
}
- ]
-}
\ No newline at end of file
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/com.unity.ml-agents b/com.unity.ml-agents
deleted file mode 160000
index 24ce875a0c..0000000000
--- a/com.unity.ml-agents
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 24ce875a0ce256bffd06d7c80e944459c5b9ba8d
diff --git a/com.unity.ml-agents.extensions/package.json b/com.unity.ml-agents.extensions/package.json
index 636102c730..436ba05296 100644
--- a/com.unity.ml-agents.extensions/package.json
+++ b/com.unity.ml-agents.extensions/package.json
@@ -2,10 +2,10 @@
"name": "com.unity.ml-agents.extensions",
"displayName": "ML Agents Extensions",
"version": "0.6.1-preview",
- "unity": "2020.3",
+ "unity": "2021.3",
"description": "A source-only package for new features based on ML-Agents",
"dependencies": {
- "com.unity.ml-agents": "2.2.1-exp.1",
+ "com.unity.ml-agents": "2.3.0-exp.3",
"com.unity.modules.physics": "1.0.0"
}
}
diff --git a/com.unity.ml-agents/.gitignore b/com.unity.ml-agents/.gitignore
new file mode 100755
index 0000000000..b40e78e61a
--- /dev/null
+++ b/com.unity.ml-agents/.gitignore
@@ -0,0 +1,30 @@
+artifacts/**
+build/**
+.build_script/**
+node_modules/**
+.DS_Store
+.npmrc
+!Documentation~
+!.Documentation
+npm-debug.log
+build.sh.meta
+build.bat.meta
+.idea/
+!Samples/*/*.unitypackage
+
+/[Ll]ibrary/
+/Logs/
+/[Tt]emp/
+/[Oo]bj/
+/[Bb]uild/
+/[Bb]uilds/
+/Assets/AssetStoreTools*
+/Assets/Plugins*
+/Assets/Demonstrations*
+/csharp_timers.json
+
+# Visual Studio 2015 cache directory
+/.vs/
+
+*.api
+*.api.meta
diff --git a/com.unity.ml-agents/.npmignore b/com.unity.ml-agents/.npmignore
new file mode 100755
index 0000000000..0f2d8d322c
--- /dev/null
+++ b/com.unity.ml-agents/.npmignore
@@ -0,0 +1,20 @@
+artifacts/**
+build/**
+.build_script/**
+node_modules/**
+Documentation/ApiDocs/**
+Documentation~/ApiDocs/**
+.DS_Store
+.npmrc
+.npmignore
+.gitignore
+CONTRIBUTING.md
+CONTRIBUTING.md.meta
+QAReport.md
+QAReport.md.meta
+.gitlab-ci.yml
+build.sh
+build.sh.meta
+build.bat
+build.bat.meta
+upm-ci.log
diff --git a/com.unity.ml-agents/CHANGELOG.md b/com.unity.ml-agents/CHANGELOG.md
new file mode 100755
index 0000000000..45deb61b80
--- /dev/null
+++ b/com.unity.ml-agents/CHANGELOG.md
@@ -0,0 +1,902 @@
+# Changelog
+
+All notable changes to this package will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to
+[Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [2.3.0-exp.3] - 2022-11-21
+### Major Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- The minimum supported Unity version was updated to 2021.3. (#)
+
+#### ml-agents / ml-agents-envs
+- Add your trainers to the package using Ml-Agents Custom Trainers plugin. (#)
+ - ML-Agents Custom Trainers plugin is an extensible plugin system to define new trainers based on the
+ High level trainer API, read more [here](../docs/Python-Custom-Trainer-Plugin.md).
+- Refactored core modules to make ML-Agents internal classes more generalizable to various RL algorithms. (#)
+- The minimum supported Python version for ML-agents has changed to 3.8.13. (#)
+- The minimum supported version of PyTorch was changed to 1.8.0. (#)
+- Add shared critic configurability for PPO. (#)
+- We moved `UnityToGymWrapper` and `PettingZoo` API to `ml-agents-envs` package. All these environments will be
+versioned under `ml-agents-envs` package in the future (#)
+
+### Minor Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- Added switch to RayPerceptionSensor to allow rays to be ordered left to right. (#26)
+ - Current alternating order is still the default but will be deprecated.
+- Added suppport for enabling/disabling camera object attached to camera sensor in order to improve performance. (#31)
+
+#### ml-agents / ml-agents-envs
+- Renaming the path that shadows torch with "mlagents/trainers/torch_entities" and update respective imports (#)
+
+
+### Bug Fixes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+#### ml-agents / ml-agents-envs
+
+
+## [2.3.0-exp.2] - 2022-03-28
+### Major Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+#### ml-agents / ml-agents-envs
+- Refactored to support the new ML-Agents Pro package.
+- The minimum supported Python version for ML-Agents-envs is changed to 3.7.2 (#)
+- Added support for the PettingZoo multi-agent API (#)
+- Refactored `gym-unity` into the `ml-agents-envs` package (#)
+
+### Minor Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- Upgrade barracuda dependency to 3.0.0 (#)
+#### ml-agents / ml-agents-envs
+- Added the new unity_vec_env file to the ml-agents-envs module
+- Extended support to python 3.9.10
+
+### Bug Fixes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+#### ml-agents / ml-agents-envs
+
+## [2.2.1-exp.1] - 2022-01-14
+### Major Changes
+
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- The minimum supported Unity version was updated to 2020.3. (#5673)
+- Added a new feature to replicate training areas dynamically during runtime. (#5568)
+- Update Barracuda to 2.3.1-preview (#5591)
+- Update Input System to 1.3.0 (#5661)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+
+### Minor Changes
+
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- Added the capacity to initialize behaviors from any checkpoint and not just the latest one (#5525)
+- Added the ability to get a read-only view of the stacked observations (#5523)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Set gym version in gym-unity to gym release 0.20.0 (#5540)
+- Added support for having `beta`, `epsilon`, and `learning rate` on separate schedules (affects only PPO and POCA). (#5538)
+- Changed default behavior to restart crashed Unity environments rather than exiting. (#5553)
+ - Rate & lifetime limits on this are configurable via 3 new yaml options
+ 1. env_params.max_lifetime_restarts (--max-lifetime-restarts) [default=10]
+ 2. env_params.restarts_rate_limit_n (--restarts-rate-limit-n) [default=1]
+ 3. env_params.restarts_rate_limit_period_s (--restarts-rate-limit-period-s) [default=60]
+- Deterministic action selection is now supported during training and inference(#5619)
+ - Added a new `--deterministic` cli flag to deterministically select the most probable actions in policy. The same thing can
+ be achieved by adding `deterministic: true` under `network_settings` of the run options configuration.(#5597)
+ - Extra tensors are now serialized to support deterministic action selection in onnx. (#5593)
+ - Support inference with deterministic action selection in editor (#5599)
+- Added minimal analytics collection to LL-API (#5511)
+- Update Colab notebooks for GridWorld example with DQN illustrating the use of the Python API and how to export to ONNX (#5643)
+
+### Bug Fixes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- Update gRPC native lib to universal for arm64 and x86_64. This change should enable ml-agents usage on mac M1 (#5283, #5519)
+- Fixed a bug where ml-agents code wouldn't compile on platforms that didn't support analytics (PS4/5, XBoxOne) (#5628)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Fixed a bug where the critics were not being normalized during training. (#5595)
+- Fixed the bug where curriculum learning would crash because of the incorrect run_options parsing. (#5586)
+- Fixed a bug in multi-agent cooperative training where agents might not receive all of the states of
+terminated teammates. (#5441)
+- Fixed wrong attribute name in argparser for torch device option (#5433)(#5467)
+- Fixed conflicting CLI and yaml options regarding resume & initialize_from (#5495)
+- Fixed failing tests for gym-unity due to gym 0.20.0 release (#5540)
+- Fixed a bug in VAIL where the variational bottleneck was not properly passing gradients (#5546)
+- Harden user PII protection logic and extend TrainingAnalytics to expose detailed configuration parameters. (#5512)
+
+## [2.1.0-exp.1] - 2021-06-09
+### Minor Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- update Barracuda to 2.0.0-pre.3. (#5385)
+- Fixed NullReferenceException when adding Behavior Parameters with no Agent. (#5382)
+- Add stacking option in Editor for `VectorSensorComponent`. (#5376)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Lock cattrs dependency version to 1.6. (#5397)
+- Added a fully connected visual encoder for environments with very small image inputs. (#5351)
+- Colab notebooks illustrating the use of the Python API are now part of the repository. (#5399)
+
+### Bug Fixes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- RigidBodySensorComponent now displays a warning if it's used in a way that won't generate useful observations. (#5387)
+- Update the documentation with a note saying that `GridSensor` does not work in 2D environments. (#5396)
+- Fixed an error where sensors would not reset properly before collecting the last observation at the end of an
+episode. (#5375)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The calculation of the target entropy of SAC with continuous actions was incorrect and has been fixed. (#5372)
+- Fixed an issue where the histogram stats would not be reported correctly in TensorBoard. (#5410)
+- Fixed error when importing models which use the ResNet encoder. (#5358)
+
+
+## [2.0.0-exp.1] - 2021-04-22
+### Major Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- The minimum supported Unity version was updated to 2019.4. (#5166)
+- Several breaking interface changes were made. See the
+[Migration Guide](https://github.com/Unity-Technologies/ml-agents/blob/release_17_docs/docs/Migrating.md) for more
+details.
+- Some methods previously marked as `Obsolete` have been removed. If you were using these methods, you need to replace them with their supported counterpart.
+- The interface for disabling discrete actions in `IDiscreteActionMask` has changed.
+`WriteMask(int branch, IEnumerable actionIndices)` was replaced with
+`SetActionEnabled(int branch, int actionIndex, bool isEnabled)`. (#5060)
+- IActuator now implements IHeuristicProvider. (#5110)
+- `ISensor.GetObservationShape()` was removed, and `GetObservationSpec()` was added. The `ITypedSensor`
+and `IDimensionPropertiesSensor` interfaces were removed. (#5127)
+- `ISensor.GetCompressionType()` was removed, and `GetCompressionSpec()` was added. The `ISparseChannelSensor`
+interface was removed. (#5164)
+- The abstract method `SensorComponent.GetObservationShape()` was no longer being called, so it has been removed. (#5172)
+- `SensorComponent.CreateSensor()` was replaced with `SensorComponent.CreateSensors()`, which returns an `ISensor[]`. (#5181)
+- `Match3Sensor` was refactored to produce cell and special type observations separately, and `Match3SensorComponent` now
+produces two `Match3Sensor`s (unless there are no special types). Previously trained models will have different observation
+sizes and will need to be retrained. (#5181)
+- The `AbstractBoard` class for integration with Match-3 games was changed to make it easier to support boards with
+different sizes using the same model. For a summary of the interface changes, please see the Migration Guide. (##5189)
+- Updated the Barracuda package to version `1.4.0-preview`(#5236)
+- `GridSensor` has been refactored and moved to main package, with changes to both sensor interfaces and behaviors.
+Exsisting GridSensor created by extension package will not work in newer version. Previously trained models will
+need to be retrained. Please see the Migration Guide for more details. (#5256)
+- Models trained with 1.x versions of ML-Agents will no longer work at inference if they were trained using recurrent neural networks (#5254)
+
+### Minor Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- The `.onnx` models input names have changed. All input placeholders will now use the prefix `obs_` removing the distinction between visual and vector observations. In addition, the inputs and outputs of LSTM changed. Models created with this version will not be usable with previous versions of the package (#5080, #5236)
+- The `.onnx` models discrete action output now contains the discrete actions values and not the logits. Models created with this version will not be usable with previous versions of the package (#5080)
+- Added ML-Agents package settings. (#5027)
+- Make com.unity.modules.unityanalytics an optional dependency. (#5109)
+- Make com.unity.modules.physics and com.unity.modules.physics2d optional dependencies. (#5112)
+- The default `InferenceDevice` is now `InferenceDevice.Default`, which is equivalent to `InferenceDevice.Burst`. If you
+depend on the previous behavior, you can explicitly set the Agent's `InferenceDevice` to `InferenceDevice.CPU`. (#5175)
+- Added support for `Goal Signal` as a type of observation. Trainers can now use HyperNetworks to process `Goal Signal`. Trainers with HyperNetworks are more effective at solving multiple tasks. (#5142, #5159, #5149)
+- Modified the [GridWorld environment](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Examples.md#gridworld) to use the new `Goal Signal` feature. (#5193)
+- `DecisionRequester.ShouldRequestDecision()` and `ShouldRequestAction()`methods were added. These are used to
+determine whether `Agent.RequestDecision()` and `Agent.RequestAction()` are called (respectively). (#5223)
+- `RaycastPerceptionSensor` now caches its raycast results; they can be accessed via `RayPerceptionSensor.RayPerceptionOutput`. (#5222)
+- `ActionBuffers` are now reset to zero before being passed to `Agent.Heuristic()` and
+`IHeuristicProvider.Heuristic()`. (#5227)
+- `Agent` will now call `IDisposable.Dispose()` on all `ISensor`s that implement the `IDisposable` interface. (#5233)
+- `CameraSensor`, `RenderTextureSensor`, and `Match3Sensor` will now reuse their `Texture2D`s, reducing the
+amount of memory that needs to be allocated during runtime. (#5233)
+- Optimzed `ObservationWriter.WriteTexture()` so that it doesn't call `Texture2D.GetPixels32()` for `RGB24` textures.
+This results in much less memory being allocated during inference with `CameraSensor` and `RenderTextureSensor`. (#5233)
+- The Match-3 integration utilities were moved from `com.unity.ml-agents.extensions` to `com.unity.ml-agents`. (#5259)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Some console output have been moved from `info` to `debug` and will not be printed by default. If you want all messages to be printed, you can run `mlagents-learn` with the `--debug` option or add the line `debug: true` at the top of the yaml config file. (#5211)
+- When using a configuration YAML, it is required to define all behaviors found in a Unity
+executable in the trainer configuration YAML, or specify `default_settings`. (#5210)
+- The embedding size of attention layers used when a BufferSensor is in the scene has been changed. It is now fixed to 128 units. It might be impossible to resume training from a checkpoint of a previous version. (#5272)
+
+### Bug Fixes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- Fixed a bug where sensors and actuators could get sorted inconsistently on different systems to different Culture
+settings. Unfortunately, this may require retraining models if it changes the resulting order of the sensors
+or actuators on your system. (#5194)
+- Removed additional memory allocations that were occurring due to assert messages and iterating of DemonstrationRecorders. (#5246)
+- Fixed a bug where agent trying to access unintialized fields when creating a new RayPerceptionSensorComponent on an agent. (#5261)
+- Fixed a bug where the DemonstrationRecorder would throw a null reference exception if Num Steps To Record was > 0 and Record was turned off. (#5274)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Fixed a bug where --results-dir has no effect. (#5269)
+- Fixed a bug where old `.pt` checkpoints were not deleted during training. (#5271)
+- The `UnityToGymWrapper` initializer now accepts an optional `action_space_seed` seed. If this is specified, it will
+be used to set the random seed on the resulting action space. (#5303)
+
+
+## [1.9.1-preview] - 2021-04-13
+### Major Changes
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The `--resume` flag now supports resuming experiments with additional reward providers or
+ loading partial models if the network architecture has changed. See
+ [here](https://github.com/Unity-Technologies/ml-agents/blob/release_16_docs/docs/Training-ML-Agents.md#loading-an-existing-model)
+ for more details. (#5213)
+
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- Fixed erroneous warnings when using the Demonstration Recorder. (#5216)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Fixed an issue which was causing increased variance when using LSTMs. Also fixed an issue with LSTM when used with POCA and `sequence_length` < `time_horizon`. (#5206)
+- Fixed a bug where the SAC replay buffer would not be saved out at the end of a run, even if `save_replay_buffer` was enabled. (#5205)
+- ELO now correctly resumes when loading from a checkpoint. (#5202)
+- In the Python API, fixed `validate_action` to expect the right dimensions when `set_action_single_agent` is called. (#5208)
+- In the `GymToUnityWrapper`, raise an appropriate warning if `step()` is called after an environment is done. (#5204)
+- Fixed an issue where using one of the `gym` wrappers would override user-set log levels. (#5201)
+## [1.9.0-preview] - 2021-03-17
+### Major Changes
+#### com.unity.ml-agents (C#)
+- The `BufferSensor` and `BufferSensorComponent` have been added. They allow the Agent to observe variable number of entities. For an example, see the [Sorter environment](https://github.com/Unity-Technologies/ml-agents/blob/release_15_docs/docs/Learning-Environment-Examples.md#sorter). (#4909)
+- The `SimpleMultiAgentGroup` class and `IMultiAgentGroup` interface have been added. These allow Agents to be given rewards and
+ end episodes in groups. For examples, see the [Cooperative Push Block](https://github.com/Unity-Technologies/ml-agents/blob/release_15_docs/docs/Learning-Environment-Examples.md#cooperative-push-block), [Dungeon Escape](https://github.com/Unity-Technologies/ml-agents/blob/release_15_docs/docs/Learning-Environment-Examples.md#dungeon-escape) and [Soccer](https://github.com/Unity-Technologies/ml-agents/blob/release_15_docs/docs/Learning-Environment-Examples.md#soccer-twos) environments. (#4923)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The MA-POCA trainer has been added. This is a new trainer that enables Agents to learn how to work together in groups. Configure
+ `poca` as the trainer in the configuration YAML after instantiating a `SimpleMultiAgentGroup` to use this feature. (#5005)
+
+### Minor Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- Updated com.unity.barracuda to 1.3.2-preview. (#5084)
+- Added 3D Ball to the `com.unity.ml-agents` samples. (#5077)
+- Make com.unity.modules.unityanalytics an optional dependency. (#5109)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The `encoding_size` setting for RewardSignals has been deprecated. Please use `network_settings` instead. (#4982)
+- Sensor names are now passed through to `ObservationSpec.name`. (#5036)
+
+### Bug Fixes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- An issue that caused `GAIL` to fail for environments where agents can terminate episodes by self-sacrifice has been fixed. (#4971)
+- Made the error message when observations of different shapes are sent to the trainer clearer. (#5030)
+- An issue that prevented curriculums from incrementing with self-play has been fixed. (#5098)
+
+## [1.8.1-preview] - 2021-03-08
+### Minor Changes
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The `cattrs` version dependency was updated to allow `>=1.1.0` on Python 3.8 or higher. (#4821)
+
+### Bug Fixes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- Fix an issue where queuing InputEvents overwrote data from previous events in the same frame. (#5034)
+
+## [1.8.0-preview] - 2021-02-17
+### Major Changes
+#### com.unity.ml-agents (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- TensorFlow trainers have been removed, please use the Torch trainers instead. (#4707)
+- A plugin system for `mlagents-learn` has been added. You can now define custom
+ `StatsWriter` implementations and register them to be called during training.
+ More types of plugins will be added in the future. (#4788)
+
+### Minor Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- The `ActionSpec` constructor is now public. Previously, it was not possible to create an
+ ActionSpec with both continuous and discrete actions from code. (#4896)
+- `StatAggregationMethod.Sum` can now be passed to `StatsRecorder.Add()`. This
+ will result in the values being summed (instead of averaged) when written to
+ TensorBoard. Thanks to @brccabral for the contribution! (#4816)
+- The upper limit for the time scale (by setting the `--time-scale` paramater in mlagents-learn) was
+ removed when training with a player. The Editor still requires it to be clamped to 100. (#4867)
+- Added the IHeuristicProvider interface to allow IActuators as well as Agent implement the Heuristic function to generate actions.
+ Updated the Basic example and the Match3 Example to use Actuators.
+ Changed the namespace and file names of classes in com.unity.ml-agents.extensions. (#4849)
+- Added `VectorSensor.AddObservation(IList)`. `VectorSensor.AddObservation(IEnumerable)`
+ is deprecated. The `IList` version is recommended, as it does not generate any
+ additional memory allocations. (#4887)
+- Added `ObservationWriter.AddList()` and deprecated `ObservationWriter.AddRange()`.
+ `AddList()` is recommended, as it does not generate any additional memory allocations. (#4887)
+- The Barracuda dependency was upgraded to 1.3.0. (#4898)
+- Added `ActuatorComponent.CreateActuators`, and deprecate `ActuatorComponent.CreateActuator`. The
+ default implementation will wrap `ActuatorComponent.CreateActuator` in an array and return that. (#4899)
+- `InferenceDevice.Burst` was added, indicating that Agent's model will be run using Barracuda's Burst backend.
+ This is the default for new Agents, but existing ones that use `InferenceDevice.CPU` should update to
+ `InferenceDevice.Burst`. (#4925)
+- Add an InputActuatorComponent to allow the generation of Agent action spaces from an InputActionAsset.
+ Projects wanting to use this feature will need to add the
+ [Input System Package](https://docs.unity3d.com/Packages/com.unity.inputsystem@1.1/manual/index.html)
+ at version 1.1.0-preview.3 or later. (#4881)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Tensorboard now logs the Environment Reward as both a scalar and a histogram. (#4878)
+- Added a `--torch-device` commandline option to `mlagents-learn`, which sets the default
+ [`torch.device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device) used for training. (#4888)
+- The `--cpu` commandline option had no effect and was removed. Use `--torch-device=cpu` to force CPU training. (#4888)
+- The `mlagents_env` API has changed, `BehaviorSpec` now has a `observation_specs` property containing a list of `ObservationSpec`. For more information on `ObservationSpec` see [here](https://github.com/Unity-Technologies/ml-agents/blob/release_13_docs/docs/Python-API.md#behaviorspec). (#4763, #4825)
+
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- Fix a compile warning about using an obsolete enum in `GrpcExtensions.cs`. (#4812)
+- CameraSensor now logs an error if the GraphicsDevice is null. (#4880)
+- Removed unnecessary memory allocations in `ActuatorManager.UpdateActionArray()` (#4877)
+- Removed unnecessary memory allocations in `SensorShapeValidator.ValidateSensors()` (#4879)
+- Removed unnecessary memory allocations in `SideChannelManager.GetSideChannelMessage()` (#4886)
+- Removed several memory allocations that happened during inference. On a test scene, this
+ reduced the amount of memory allocated by approximately 25%. (#4887)
+- Removed several memory allocations that happened during inference with discrete actions. (#4922)
+- Properly catch permission errors when writing timer files. (#4921)
+- Unexpected exceptions during training initialization and shutdown are now logged. If you see
+ "noisy" logs, please let us know! (#4930, #4935)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Fixed a bug that would cause an exception when `RunOptions` was deserialized via `pickle`. (#4842)
+- Fixed a bug that can cause a crash if a behavior can appear during training in multi-environment training. (#4872)
+- Fixed the computation of entropy for continuous actions. (#4869)
+- Fixed a bug that would cause `UnityEnvironment` to wait the full timeout
+ period and report a misleading error message if the executable crashed
+ without closing the connection. It now periodically checks the process status
+ while waiting for a connection, and raises a better error message if it crashes. (#4880)
+- Passing a `-logfile` option in the `--env-args` option to `mlagents-learn` is
+ no longer overwritten. (#4880)
+- The `load_weights` function was being called unnecessarily often in the Ghost Trainer leading to training slowdowns. (#4934)
+
+
+## [1.7.2-preview] - 2020-12-22
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- Add analytics package dependency to the package manifest. (#4794)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Fixed the docker build process. (#4791)
+
+
+## [1.7.0-preview] - 2020-12-21
+### Major Changes
+#### com.unity.ml-agents (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- PyTorch trainers now support training agents with both continuous and discrete action spaces. (#4702)
+The `.onnx` models generated by the trainers of this release are incompatible with versions of Barracuda before `1.2.1-preview`. If you upgrade the trainers, you must upgrade the version of the Barracuda package as well (which can be done by upgrading the `com.unity.ml-agents` package).
+### Minor Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- Agents with both continuous and discrete actions are now supported. You can specify
+both continuous and discrete action sizes in Behavior Parameters. (#4702, #4718)
+- In order to improve the developer experience for Unity ML-Agents Toolkit, we have added in-editor analytics.
+Please refer to "Information that is passively collected by Unity" in the
+[Unity Privacy Policy](https://unity3d.com/legal/privacy-policy). (#4677)
+- The FoodCollector example environment now uses continuous actions for moving and
+discrete actions for shooting. (#4746)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- `ActionSpec.validate_action()` now enforces that `UnityEnvironment.set_action_for_agent()` receives a 1D `np.array`. (#4691)
+
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- Removed noisy warnings about API minor version mismatches in both the C# and python code. (#4688)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+
+
+## [1.6.0-preview] - 2020-11-18
+### Major Changes
+#### com.unity.ml-agents (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+ - PyTorch trainers are now the default. See the
+ [installation docs](https://github.com/Unity-Technologies/ml-agents/blob/release_10_docs/docs/Installation.md) for
+ more information on installing PyTorch. For the time being, TensorFlow is still available;
+ you can use the TensorFlow backend by adding `--tensorflow` to the CLI, or
+ adding `framework: tensorflow` in the configuration YAML. (#4517)
+
+### Minor Changes
+#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
+- The Barracuda dependency was upgraded to 1.1.2 (#4571)
+- Utilities were added to `com.unity.ml-agents.extensions` to make it easier to
+integrate with match-3 games. See the [readme](https://github.com/Unity-Technologies/ml-agents/blob/release_10_docs/com.unity.ml-agents.extensions/Documentation~/Match3.md)
+for more details. (#4515)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The `action_probs` node is no longer listed as an output in TensorFlow models (#4613).
+
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- `Agent.CollectObservations()` and `Agent.EndEpisode()` will now throw an exception
+if they are called recursively (for example, if they call `Agent.EndEpisode()`).
+Previously, this would result in an infinite loop and cause the editor to hang. (#4573)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Fixed an issue where runs could not be resumed when using TensorFlow and Ghost Training. (#4593)
+- Change the tensor type of step count from int32 to int64 to address the overflow issue when step
+goes larger than 2^31. Previous Tensorflow checkpoints will become incompatible and cannot be loaded. (#4607)
+- Remove extra period after "Training" in console log. (#4674)
+
+
+## [1.5.0-preview] - 2020-10-14
+### Major Changes
+#### com.unity.ml-agents (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+ - Added the Random Network Distillation (RND) intrinsic reward signal to the Pytorch
+ trainers. To use RND, add a `rnd` section to the `reward_signals` section of your
+ yaml configuration file. [More information here](https://github.com/Unity-Technologies/ml-agents/blob/release_9_docs/docs/Training-Configuration-File.md#rnd-intrinsic-reward) (#4473)
+### Minor Changes
+#### com.unity.ml-agents (C#)
+ - Stacking for compressed observations is now supported. An additional setting
+ option `Observation Stacks` is added in editor to sensor components that support
+ compressed observations. A new class `ISparseChannelSensor` with an
+ additional method `GetCompressedChannelMapping()`is added to generate a mapping
+ of the channels in compressed data to the actual channel after decompression,
+ for the python side to decompress correctly. (#4476)
+ - Added a new visual 3DBall environment. (#4513)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+ - The Communication API was changed to 1.2.0 to indicate support for stacked
+ compressed observation. A new entry `compressed_channel_mapping` is added to the
+ proto to handle decompression correctly. Newer versions of the package that wish to
+ make use of this will also need a compatible version of the Python trainers. (#4476)
+ - In the `VisualFoodCollector` scene, a vector flag representing the frozen state of
+ the agent is added to the input observations in addition to the original first-person
+ camera frame. The scene is able to train with the provided default config file. (#4511)
+ - Added conversion to string for sampler classes to increase the verbosity of
+ the curriculum lesson changes. The lesson updates would now output the sampler
+ stats in addition to the lesson and parameter name to the console. (#4484)
+ - Localized documentation in Russian is added. Thanks to @SergeyMatrosov for
+ the contribution. (#4529)
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+ - Fixed a bug where accessing the Academy outside of play mode would cause the
+ Academy to get stepped multiple times when in play mode. (#4532)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+
+
+## [1.4.0-preview] - 2020-09-16
+### Major Changes
+#### com.unity.ml-agents (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+
+### Minor Changes
+#### com.unity.ml-agents (C#)
+- The `IActuator` interface and `ActuatorComponent` abstract class were added.
+These are analogous to `ISensor` and `SensorComponent`, but for applying actions
+for an Agent. They allow you to control the action space more programmatically
+than defining the actions in the Agent's Behavior Parameters. See
+[BasicActuatorComponent.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_7_docs/Project/Assets/ML-Agents/Examples/Basic/Scripts/BasicActuatorComponent.cs)
+ for an example of how to use them. (#4297, #4315)
+- Update Barracuda to 1.1.1-preview (#4482)
+- Enabled C# formatting using `dotnet-format`. (#4362)
+- GridSensor was added to the `com.unity.ml-agents.extensions` package. Thank you
+to Jaden Travnik from Eidos Montreal for the contribution! (#4399)
+- Added `Agent.EpisodeInterrupted()`, which can be used to reset the agent when
+it has reached a user-determined maximum number of steps. This behaves similarly
+to `Agent.EndEpsiode()` but has a slightly different effect on training (#4453).
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Experimental PyTorch support has been added. Use `--torch` when running `mlagents-learn`, or add
+`framework: pytorch` to your trainer configuration (under the behavior name) to enable it.
+Note that PyTorch 1.6.0 or greater should be installed to use this feature; see
+[the PyTorch website](https://pytorch.org/) for installation instructions and
+[the relevant ML-Agents docs](https://github.com/Unity-Technologies/ml-agents/blob/release_7_docs/docs/Training-ML-Agents.md#using-pytorch-experimental) for usage. (#4335)
+- The minimum supported version of TensorFlow was increased to 1.14.0. (#4411)
+- Compressed visual observations with >3 channels are now supported. In
+`ISensor.GetCompressedObservation()`, this can be done by writing 3 channels at a
+time to a PNG and concatenating the resulting bytes. (#4399)
+- The Communication API was changed to 1.1.0 to indicate support for concatenated PNGs
+(see above). Newer versions of the package that wish to make use of this will also need
+a compatible version of the trainer. (#4462)
+- A CNN (`vis_encode_type: match3`) for smaller grids, e.g. board games, has been added.
+(#4434)
+- You can now again specify a default configuration for your behaviors. Specify `default_settings` in
+your trainer configuration to do so. (#4448)
+- Improved the executable detection logic for environments on Windows. (#4485)
+
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- Previously, `com.unity.ml-agents` was not declaring built-in packages as
+dependencies in its package.json. The relevant dependencies are now listed. (#4384)
+- Agents no longer try to send observations when they become disabled if the
+Academy has been shut down. (#4489)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Fixed the sample code in the custom SideChannel example. (#4466)
+- A bug in the observation normalizer that would cause rewards to decrease
+when using `--resume` was fixed. (#4463)
+- Fixed a bug in exporting Pytorch models when using multiple discrete actions. (#4491)
+
+## [1.3.0-preview] - 2020-08-12
+
+### Major Changes
+#### com.unity.ml-agents (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The minimum supported Python version for ml-agents-envs was changed to 3.6.1. (#4244)
+- The interaction between EnvManager and TrainerController was changed; EnvManager.advance() was split into to stages,
+and TrainerController now uses the results from the first stage to handle new behavior names. This change speeds up
+Python training by approximately 5-10%. (#4259)
+
+### Minor Changes
+#### com.unity.ml-agents (C#)
+- StatsSideChannel now stores multiple values per key. This means that multiple
+calls to `StatsRecorder.Add()` with the same key in the same step will no
+longer overwrite each other. (#4236)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The versions of `numpy` supported by ml-agents-envs were changed to disallow 1.19.0 or later. This was done to reflect
+a similar change in TensorFlow's requirements. (#4274)
+- Model checkpoints are now also saved as .nn files during training. (#4127)
+- Model checkpoint info is saved in TrainingStatus.json after training is concluded (#4127)
+- CSV statistics writer was removed (#4300).
+
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- Academy.EnvironmentStep() will now throw an exception if it is called
+recursively (for example, by an Agent's CollectObservations method).
+Previously, this would result in an infinite loop and cause the editor to hang.
+(#4226)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The algorithm used to normalize observations was introducing NaNs if the initial observations were too large
+due to incorrect initialization. The initialization was fixed and is now the observation means from the
+first trajectory processed. (#4299)
+
+## [1.2.0-preview] - 2020-07-15
+
+### Major Changes
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- The Parameter Randomization feature has been refactored to enable sampling of new parameters per episode to improve robustness. The
+ `resampling-interval` parameter has been removed and the config structure updated. More information [here](https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Training-ML-Agents.md). (#4065)
+- The Parameter Randomization feature has been merged with the Curriculum feature. It is now possible to specify a sampler
+in the lesson of a Curriculum. Curriculum has been refactored and is now specified at the level of the parameter, not the
+behavior. More information
+[here](https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Training-ML-Agents.md).(#4160)
+
+### Minor Changes
+#### com.unity.ml-agents (C#)
+- `SideChannelsManager` was renamed to `SideChannelManager`. The old name is still supported, but deprecated. (#4137)
+- `RayPerceptionSensor.Perceive()` now additionally store the GameObject that was hit by the ray. (#4111)
+- The Barracuda dependency was upgraded to 1.0.1 (#4188)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Added new Google Colab notebooks to show how to use `UnityEnvironment'. (#4117)
+
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- Fixed an issue where RayPerceptionSensor would raise an exception when the
+list of tags was empty, or a tag in the list was invalid (unknown, null, or
+empty string). (#4155)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Fixed an error when setting `initialize_from` in the trainer confiiguration YAML to
+`null`. (#4175)
+- Fixed issue with FoodCollector, Soccer, and WallJump when playing with keyboard. (#4147, #4174)
+- Fixed a crash in StatsReporter when using threaded trainers with very frequent summary writes
+(#4201)
+- `mlagents-learn` will now raise an error immediately if `--num-envs` is greater than 1 without setting the `--env`
+argument. (#4203)
+
+## [1.1.0-preview] - 2020-06-10
+### Major Changes
+#### com.unity.ml-agents (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Added new Walker environments. Improved ragdoll stability/performance. (#4037)
+- `max_step` in the `TerminalStep` and `TerminalSteps` objects was renamed `interrupted`.
+- `beta` and `epsilon` in `PPO` are no longer decayed by default but follow the same schedule as learning rate. (#3940)
+- `get_behavior_names()` and `get_behavior_spec()` on UnityEnvironment were replaced by the `behavior_specs` property. (#3946)
+- The first version of the Unity Environment Registry (Experimental) has been released. More information [here](https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Unity-Environment-Registry.md)(#3967)
+- `use_visual` and `allow_multiple_visual_obs` in the `UnityToGymWrapper` constructor
+were replaced by `allow_multiple_obs` which allows one or more visual observations and
+vector observations to be used simultaneously. (#3981) Thank you @shakenes !
+- Curriculum and Parameter Randomization configurations have been merged
+ into the main training configuration file. Note that this means training
+ configuration files are now environment-specific. (#3791)
+- The format for trainer configuration has changed, and the "default" behavior has been deprecated.
+ See the [Migration Guide](https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Migrating.md) for more details. (#3936)
+- Training artifacts (trained models, summaries) are now found in the `results/`
+ directory. (#3829)
+- When using Curriculum, the current lesson will resume if training is quit and resumed. As such,
+ the `--lesson` CLI option has been removed. (#4025)
+### Minor Changes
+#### com.unity.ml-agents (C#)
+- `ObservableAttribute` was added. Adding the attribute to fields or properties on an Agent will allow it to generate
+ observations via reflection. (#3925, #4006)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- Unity Player logs are now written out to the results directory. (#3877)
+- Run configuration YAML files are written out to the results directory at the end of the run. (#3815)
+- The `--save-freq` CLI option has been removed, and replaced by a `checkpoint_interval` option in the trainer configuration YAML. (#4034)
+- When trying to load/resume from a checkpoint created with an earlier verison of ML-Agents,
+ a warning will be thrown. (#4035)
+### Bug Fixes
+- Fixed an issue where SAC would perform too many model updates when resuming from a
+ checkpoint, and too few when using `buffer_init_steps`. (#4038)
+- Fixed a bug in the onnx export that would cause constants needed for inference to not be visible to some versions of
+ the Barracuda importer. (#4073)
+#### com.unity.ml-agents (C#)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+
+
+## [1.0.2-preview] - 2020-05-20
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- Fix missing .meta file
+
+
+## [1.0.1-preview] - 2020-05-19
+### Bug Fixes
+#### com.unity.ml-agents (C#)
+- A bug that would cause the editor to go into a loop when a prefab was selected was fixed. (#3949)
+- BrainParameters.ToProto() no longer throws an exception if none of the fields have been set. (#3930)
+- The Barracuda dependency was upgraded to 0.7.1-preview. (#3977)
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+- An issue was fixed where using `--initialize-from` would resume from the past step count. (#3962)
+- The gym wrapper error for the wrong number of agents now fires more consistently, and more details
+ were added to the error message when the input dimension is wrong. (#3963)
+
+
+## [1.0.0-preview] - 2020-04-30
+### Major Changes
+#### com.unity.ml-agents (C#)
+
+- The `MLAgents` C# namespace was renamed to `Unity.MLAgents`, and other nested
+ namespaces were similarly renamed. (#3843)
+- The offset logic was removed from DecisionRequester. (#3716)
+- The signature of `Agent.Heuristic()` was changed to take a float array as a
+ parameter, instead of returning the array. This was done to prevent a common
+ source of error where users would return arrays of the wrong size. (#3765)
+- The communication API version has been bumped up to 1.0.0 and will use
+ [Semantic Versioning](https://semver.org/) to do compatibility checks for
+ communication between Unity and the Python process. (#3760)
+- The obsolete `Agent` methods `GiveModel`, `Done`, `InitializeAgent`,
+ `AgentAction` and `AgentReset` have been removed. (#3770)
+- The SideChannel API has changed:
+ - Introduced the `SideChannelManager` to register, unregister and access side
+ channels. (#3807)
+ - `Academy.FloatProperties` was replaced by `Academy.EnvironmentParameters`.
+ See the [Migration Guide](https://github.com/Unity-Technologies/ml-agents/blob/release_1_docs/docs/Migrating.md)
+ for more details on upgrading. (#3807)
+ - `SideChannel.OnMessageReceived` is now a protected method (was public)
+ - SideChannel IncomingMessages methods now take an optional default argument,
+ which is used when trying to read more data than the message contains. (#3751)
+ - Added a feature to allow sending stats from C# environments to TensorBoard
+ (and other python StatsWriters). To do this from your code, use
+ `Academy.Instance.StatsRecorder.Add(key, value)`. (#3660)
+- `CameraSensorComponent.m_Grayscale` and
+ `RenderTextureSensorComponent.m_Grayscale` were changed from `public` to
+ `private`. These can still be accessed via their corresponding properties.
+ (#3808)
+- Public fields and properties on several classes were renamed to follow Unity's
+ C# style conventions. All public fields and properties now use "PascalCase"
+ instead of "camelCase"; for example, `Agent.maxStep` was renamed to
+ `Agent.MaxStep`. For a full list of changes, see the pull request. (#3828)
+- `WriteAdapter` was renamed to `ObservationWriter`. If you have a custom
+ `ISensor` implementation, you will need to change the signature of its
+ `Write()` method. (#3834)
+- The Barracuda dependency was upgraded to 0.7.0-preview (which has breaking
+ namespace and assembly name changes). (#3875)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+
+- The `--load` and `--train` command-line flags have been deprecated. Training
+ now happens by default, and use `--resume` to resume training instead of
+ `--load`. (#3705)
+- The Jupyter notebooks have been removed from the repository. (#3704)
+- The multi-agent gym option was removed from the gym wrapper. For multi-agent
+ scenarios, use the [Low Level Python API](https://github.com/Unity-Technologies/ml-agents/blob/release_1_docs/docs/Python-API.md). (#3681)
+- The low level Python API has changed. You can look at the document
+ [Low Level Python API](https://github.com/Unity-Technologies/ml-agents/blob/release_1_docs/docs/Python-API.md)
+ documentation for more information. If you use `mlagents-learn` for training, this should be a
+ transparent change. (#3681)
+- Added ability to start training (initialize model weights) from a previous run
+ ID. (#3710)
+- The GhostTrainer has been extended to support asymmetric games and the
+ asymmetric example environment Strikers Vs. Goalie has been added. (#3653)
+- The `UnityEnv` class from the `gym-unity` package was renamed
+ `UnityToGymWrapper` and no longer creates the `UnityEnvironment`. Instead, the
+ `UnityEnvironment` must be passed as input to the constructor of
+ `UnityToGymWrapper` (#3812)
+
+### Minor Changes
+
+#### com.unity.ml-agents (C#)
+
+- Added new 3-joint Worm ragdoll environment. (#3798)
+- `StackingSensor` was changed from `internal` visibility to `public`. (#3701)
+- The internal event `Academy.AgentSetStatus` was renamed to
+ `Academy.AgentPreStep` and made public. (#3716)
+- Academy.InferenceSeed property was added. This is used to initialize the
+ random number generator in ModelRunner, and is incremented for each
+ ModelRunner. (#3823)
+- `Agent.GetObservations()` was added, which returns a read-only view of the
+ observations added in `CollectObservations()`. (#3825)
+- `UnityRLCapabilities` was added to help inform users when RL features are
+ mismatched between C# and Python packages. (#3831)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+
+- Format of console output has changed slightly and now matches the name of the
+ model/summary directory. (#3630, #3616)
+- Renamed 'Generalization' feature to 'Environment Parameter Randomization'.
+ (#3646)
+- Timer files now contain a dictionary of metadata, including things like the
+ package version numbers. (#3758)
+- The way that UnityEnvironment decides the port was changed. If no port is
+ specified, the behavior will depend on the `file_name` parameter. If it is
+ `None`, 5004 (the editor port) will be used; otherwise 5005 (the base
+ environment port) will be used. (#3673)
+- Running `mlagents-learn` with the same `--run-id` twice will no longer
+ overwrite the existing files. (#3705)
+- Model updates can now happen asynchronously with environment steps for better
+ performance. (#3690)
+- `num_updates` and `train_interval` for SAC were replaced with
+ `steps_per_update`. (#3690)
+- The maximum compatible version of tensorflow was changed to allow tensorflow
+ 2.1 and 2.2. This will allow use with python 3.8 using tensorflow 2.2.0rc3.
+ (#3830)
+- `mlagents-learn` will no longer set the width and height of the executable
+ window to 84x84 when no width nor height arguments are given. (#3867)
+
+### Bug Fixes
+
+#### com.unity.ml-agents (C#)
+
+- Fixed a display bug when viewing Demonstration files in the inspector. The
+ shapes of the observations in the file now display correctly. (#3771)
+
+#### ml-agents / ml-agents-envs / gym-unity (Python)
+
+- Fixed an issue where exceptions from environments provided a return code of 0.
+ (#3680)
+- Self-Play team changes will now trigger a full environment reset. This
+ prevents trajectories in progress during a team change from getting into the
+ buffer. (#3870)
+
+## [0.15.1-preview] - 2020-03-30
+
+### Bug Fixes
+
+- Raise the wall in CrawlerStatic scene to prevent Agent from falling off.
+ (#3650)
+- Fixed an issue where specifying `vis_encode_type` was required only for SAC.
+ (#3677)
+- Fixed the reported entropy values for continuous actions (#3684)
+- Fixed an issue where switching models using `SetModel()` during training would
+ use an excessive amount of memory. (#3664)
+- Environment subprocesses now close immediately on timeout or wrong API
+ version. (#3679)
+- Fixed an issue in the gym wrapper that would raise an exception if an Agent
+ called EndEpisode multiple times in the same step. (#3700)
+- Fixed an issue where logging output was not visible; logging levels are now
+ set consistently. (#3703)
+
+## [0.15.0-preview] - 2020-03-18
+
+### Major Changes
+
+- `Agent.CollectObservations` now takes a VectorSensor argument. (#3352, #3389)
+- Added `Agent.CollectDiscreteActionMasks` virtual method with a
+ `DiscreteActionMasker` argument to specify which discrete actions are
+ unavailable to the Agent. (#3525)
+- Beta support for ONNX export was added. If the `tf2onnx` python package is
+ installed, models will be saved to `.onnx` as well as `.nn` format. Note that
+ Barracuda 0.6.0 or later is required to import the `.onnx` files properly
+- Multi-GPU training and the `--multi-gpu` option has been removed temporarily.
+ (#3345)
+- All Sensor related code has been moved to the namespace `MLAgents.Sensors`.
+- All SideChannel related code has been moved to the namespace
+ `MLAgents.SideChannels`.
+- `BrainParameters` and `SpaceType` have been removed from the public API
+- `BehaviorParameters` have been removed from the public API.
+- The following methods in the `Agent` class have been deprecated and will be
+ removed in a later release:
+ - `InitializeAgent()` was renamed to `Initialize()`
+ - `AgentAction()` was renamed to `OnActionReceived()`
+ - `AgentReset()` was renamed to `OnEpisodeBegin()`
+ - `Done()` was renamed to `EndEpisode()`
+ - `GiveModel()` was renamed to `SetModel()`
+
+### Minor Changes
+
+- Monitor.cs was moved to Examples. (#3372)
+- Automatic stepping for Academy is now controlled from the
+ AutomaticSteppingEnabled property. (#3376)
+- The GetEpisodeCount, GetStepCount, GetTotalStepCount and methods of Academy
+ were changed to EpisodeCount, StepCount, TotalStepCount properties
+ respectively. (#3376)
+- Several classes were changed from public to internal visibility. (#3390)
+- Academy.RegisterSideChannel and UnregisterSideChannel methods were added.
+ (#3391)
+- A tutorial on adding custom SideChannels was added (#3391)
+- The stepping logic for the Agent and the Academy has been simplified (#3448)
+- Update Barracuda to 0.6.1-preview
+
+* The interface for `RayPerceptionSensor.PerceiveStatic()` was changed to take
+ an input class and write to an output class, and the method was renamed to
+ `Perceive()`.
+
+- The checkpoint file suffix was changed from `.cptk` to `.ckpt` (#3470)
+- The command-line argument used to determine the port that an environment will
+ listen on was changed from `--port` to `--mlagents-port`.
+- `DemonstrationRecorder` can now record observations outside of the editor.
+- `DemonstrationRecorder` now has an optional path for the demonstrations. This
+ will default to `Application.dataPath` if not set.
+- `DemonstrationStore` was changed to accept a `Stream` for its constructor, and
+ was renamed to `DemonstrationWriter`
+- The method `GetStepCount()` on the Agent class has been replaced with the
+ property getter `StepCount`
+- `RayPerceptionSensorComponent` and related classes now display the debug
+ gizmos whenever the Agent is selected (not just Play mode).
+- Most fields on `RayPerceptionSensorComponent` can now be changed while the
+ editor is in Play mode. The exceptions to this are fields that affect the
+ number of observations.
+- Most fields on `CameraSensorComponent` and `RenderTextureSensorComponent` were
+ changed to private and replaced by properties with the same name.
+- Unused static methods from the `Utilities` class (ShiftLeft, ReplaceRange,
+ AddRangeNoAlloc, and GetSensorFloatObservationSize) were removed.
+- The `Agent` class is no longer abstract.
+- SensorBase was moved out of the package and into the Examples directory.
+- `AgentInfo.actionMasks` has been renamed to `AgentInfo.discreteActionMasks`.
+- `DecisionRequester` has been made internal (you can still use the
+ DecisionRequesterComponent from the inspector). `RepeatAction` was renamed
+ `TakeActionsBetweenDecisions` for clarity. (#3555)
+- The `IFloatProperties` interface has been removed.
+- Fix #3579.
+- Improved inference performance for models with multiple action branches.
+ (#3598)
+- Fixed an issue when using GAIL with less than `batch_size` number of
+ demonstrations. (#3591)
+- The interfaces to the `SideChannel` classes (on C# and python) have changed to
+ use new `IncomingMessage` and `OutgoingMessage` classes. These should make
+ reading and writing data to the channel easier. (#3596)
+- Updated the ExpertPyramid.demo example demonstration file (#3613)
+- Updated project version for example environments to 2018.4.18f1. (#3618)
+- Changed the Product Name in the example environments to remove spaces, so that
+ the default build executable file doesn't contain spaces. (#3612)
+
+## [0.14.1-preview] - 2020-02-25
+
+### Bug Fixes
+
+- Fixed an issue which caused self-play training sessions to consume a lot of
+ memory. (#3451)
+- Fixed an IndexError when using GAIL or behavioral cloning with demonstrations
+ recorded with 0.14.0 or later (#3464)
+- Updated the `gail_config.yaml` to work with per-Agent steps (#3475)
+- Fixed demonstration recording of experiences when the Agent is done. (#3463)
+- Fixed a bug with the rewards of multiple Agents in the gym interface (#3471,
+ #3496)
+
+## [0.14.0-preview] - 2020-02-13
+
+### Major Changes
+
+- A new self-play mechanism for training agents in adversarial scenarios was
+ added (#3194)
+- Tennis and Soccer environments were refactored to enable training with
+ self-play (#3194, #3331)
+- UnitySDK folder was split into a Unity Package (com.unity.ml-agents) and our
+ examples were moved to the Project folder (#3267)
+- Academy is now a singleton and is no longer abstract (#3210, #3184)
+- In order to reduce the size of the API, several classes and methods were
+ marked as internal or private. Some public fields on the Agent were trimmed
+ (#3342, #3353, #3269)
+- Decision Period and on-demand decision checkboxes were removed from the Agent.
+ on-demand decision is now the default (#3243)
+- Calling Done() on the Agent will reset it immediately and call the AgentReset
+ virtual method (#3291, #3242)
+- The "Reset on Done" setting in AgentParameters was removed; this is now always
+ true. AgentOnDone virtual method on the Agent was removed (#3311, #3222)
+- Trainer steps are now counted per-Agent, not per-environment as in previous
+ versions. For instance, if you have 10 Agents in the scene, 20 environment
+ steps now correspond to 200 steps as printed in the terminal and in
+ Tensorboard (#3113)
+
+### Minor Changes
+
+- Barracuda was updated to 0.5.0-preview (#3329)
+- --num-runs option was removed from mlagents-learn (#3155)
+- Curriculum config files are now YAML formatted and all curricula for a
+ training run are combined into a single file (#3186)
+- ML-Agents components, such as BehaviorParameters and various Sensor
+ implementations, now appear in the Components menu (#3231)
+- Exceptions are now raised in Unity (in debug mode only) if NaN observations or
+ rewards are passed (#3221)
+- RayPerception MonoBehavior, which was previously deprecated, was removed
+ (#3304)
+- Uncompressed visual (i.e. 3d float arrays) observations are now supported.
+ CameraSensorComponent and RenderTextureSensor now have an option to write
+ uncompressed observations (#3148)
+- Agent’s handling of observations during training was improved so that an extra
+ copy of the observations is no longer maintained (#3229)
+- Error message for missing trainer config files was improved to include the
+ absolute path (#3230)
+- Support for 2017.4 LTS was dropped (#3121, #3168)
+- Some documentation improvements were made (#3296, #3292, #3295, #3281)
+
+### Bug Fixes
+
+- Numpy warning when stats don’t exist (#3251)
+- A bug that caused RayPerceptionSensor to behave inconsistently with transforms
+ that have non-1 scale was fixed (#3321)
+- Some small bugfixes to tensorflow_to_barracuda.py were backported from the
+ barracuda release (#3341)
+- Base port in the jupyter notebook example was updated to use the same port
+ that the editor uses (#3283)
+
+## [0.13.0-preview] - 2020-01-24
+
+### This is the first release of _Unity Package ML-Agents_.
+
+_Short description of this release_
diff --git a/com.unity.ml-agents/CHANGELOG.md.meta b/com.unity.ml-agents/CHANGELOG.md.meta
new file mode 100644
index 0000000000..6331df01c1
--- /dev/null
+++ b/com.unity.ml-agents/CHANGELOG.md.meta
@@ -0,0 +1,7 @@
+fileFormatVersion: 2
+guid: e19737407870a49abaaa1a90dae1a334
+TextScriptImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/CODEOWNERS b/com.unity.ml-agents/CODEOWNERS
new file mode 100644
index 0000000000..52cb7ada8f
--- /dev/null
+++ b/com.unity.ml-agents/CODEOWNERS
@@ -0,0 +1,2 @@
+# see https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners for more information
+* @unity/behavior-authoring
\ No newline at end of file
diff --git a/com.unity.ml-agents/CODEOWNERS.meta b/com.unity.ml-agents/CODEOWNERS.meta
new file mode 100644
index 0000000000..f288a23537
--- /dev/null
+++ b/com.unity.ml-agents/CODEOWNERS.meta
@@ -0,0 +1,7 @@
+fileFormatVersion: 2
+guid: 5de323c2110f44676ba99dc49409363c
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/CONTRIBUTING.md b/com.unity.ml-agents/CONTRIBUTING.md
new file mode 100644
index 0000000000..65b01e775b
--- /dev/null
+++ b/com.unity.ml-agents/CONTRIBUTING.md
@@ -0,0 +1,97 @@
+# Contribution Guidelines
+
+Thank you for your interest in contributing to the ML-Agents Toolkit! We are
+incredibly excited to see how members of our community will use and extend the
+ML-Agents Toolkit. To facilitate your contributions, we've outlined a brief set
+of guidelines to ensure that your extensions can be easily integrated.
+
+## Communication
+
+First, please read through our
+[code of conduct](https://github.com/Unity-Technologies/ml-agents/blob/main/CODE_OF_CONDUCT.md),
+as we expect all our contributors to follow it.
+
+Second, before starting on a project that you intend to contribute to the
+ML-Agents Toolkit (whether environments or modifications to the codebase), we
+**strongly** recommend posting on our
+[Issues page](https://github.com/Unity-Technologies/ml-agents/issues) and
+briefly outlining the changes you plan to make. This will enable us to provide
+some context that may be helpful for you. This could range from advice and
+feedback on how to optimally perform your changes or reasons for not doing it.
+
+Lastly, if you're looking for input on what to contribute, feel free to reach
+out to us directly at ml-agents@unity3d.com and/or browse the GitHub issues with
+the `Requests` or `Bug` label.
+
+## Git Branches
+
+The main branch corresponds to the most recent version of the project. Note
+that this may be newer that the
+[latest release](https://github.com/Unity-Technologies/ml-agents/releases/tag/latest_release).
+
+When contributing to the project, please make sure that your Pull Request (PR)
+contains the following:
+
+- Detailed description of the changes performed
+- Corresponding changes to documentation, unit tests and sample environments (if
+ applicable)
+- Summary of the tests performed to validate your changes
+- Issue numbers that the PR resolves (if any)
+
+## Environments
+
+We are currently not accepting environment contributions directly into ML-Agents.
+However, we believe community created enviornments have a lot of value to the
+community. If you have an interesting enviornment and are willing to share,
+feel free to showcase it and share any relevant files in the
+[ML-Agents forum](https://forum.unity.com/forums/ml-agents.453/).
+
+## Continuous Integration (CI)
+
+We run continuous integration on all PRs; all tests must be passing before the PR is merged.
+
+Several static checks are run on the codebase using the
+[pre-commit framework](https://pre-commit.com/) during CI. To execute the same
+checks locally, run:
+```bash
+pip install pre-commit>=2.8.0
+pip install identify>==2.1.3
+pre-commit run --all-files
+```
+
+Some hooks (for example, `black`) will output the corrected version of the code;
+others (like `mypy`) may require more effort to fix. You can optionally run
+`pre-commit install` to install it as a git hook; after this it will run on all
+commits that you make.
+
+### Code style
+
+All python code should be formatted with
+[`black`](https://github.com/psf/black).
+
+C# code is formatted using [`dotnet-format`](https://github.com/dotnet/format).
+You must have [dotnet](https://dotnet.microsoft.com/download) installed first
+(but don't need to install `dotnet-format` - `pre-commit` will do that for you).
+
+### Python type annotations
+
+We use [`mypy`](http://mypy-lang.org/) to perform static type checking on python
+code. Currently not all code is annotated but we will increase coverage over
+time. If you are adding or refactoring code, please
+
+1. Add type annotations to the new or refactored code.
+2. Make sure that code calling or called by the modified code also has type
+ annotations.
+
+The
+[type hint cheat sheet](https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html)
+provides a good introduction to adding type hints.
+
+## Contributor License Agreements
+
+When you open a pull request, you will be asked to acknolwedge our Contributor
+License Agreement. We allow both individual contributions and contributions made
+on behalf of companies. We use an open source tool called CLA assistant. If you
+have any questions on our CLA, please
+[submit an issue](https://github.com/Unity-Technologies/ml-agents/issues) or
+email us at ml-agents@unity3d.com.
diff --git a/com.unity.ml-agents/CONTRIBUTING.md.meta b/com.unity.ml-agents/CONTRIBUTING.md.meta
new file mode 100644
index 0000000000..acf109e975
--- /dev/null
+++ b/com.unity.ml-agents/CONTRIBUTING.md.meta
@@ -0,0 +1,7 @@
+fileFormatVersion: 2
+guid: 60b8c21afae8d449ebcdd512e85e97ac
+TextScriptImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Documentation~/com.unity.ml-agents.md b/com.unity.ml-agents/Documentation~/com.unity.ml-agents.md
new file mode 100644
index 0000000000..7e90deef39
--- /dev/null
+++ b/com.unity.ml-agents/Documentation~/com.unity.ml-agents.md
@@ -0,0 +1,161 @@
+# About ML-Agents package (`com.unity.ml-agents`)
+
+The _ML-Agents_ package contains the primary C# SDK for the [Unity ML-Agents
+Toolkit].
+
+The package allows you to convert any Unity scene into a learning environment
+and train character behaviors using a variety of machine learning algorithms.
+Additionally, it allows you to embed these trained behaviors back into Unity
+scenes to control your characters. More specifically, the package provides the
+following core functionalities:
+
+- Define Agents: entities, or characters, whose behavior will be learned. Agents
+ are entities that generate observations (through sensors), take actions, and
+ receive rewards from the environment.
+- Define Behaviors: entities that specify how an agent should act. Multiple
+ agents can share the same Behavior and a scene may have multiple Behaviors.
+- Record demonstrations of an agent within the Editor. You can use
+ demonstrations to help train a behavior for that agent.
+- Embedding a trained behavior into the scene via the [Unity Inference Engine].
+ Embedded behaviors allow you to switch an Agent between learning and
+ inference.
+
+Note that the _ML-Agents_ package does not contain the machine learning
+algorithms for training behaviors. The _ML-Agents_ package only supports
+instrumenting a Unity scene, setting it up for training, and then embedding the
+trained model back into your Unity scene. The machine learning algorithms that
+orchestrate training are part of the companion [Python package].
+
+Note that we also provide an _ML-Agents Extensions_ package
+(`com.unity.ml-agents.extensions`) that contains early/experimental features
+that you may find useful. This package is only available from the [ML-Agents
+GitHub repo].
+
+## Package contents
+
+The following table describes the package folder structure:
+
+| **Location** | **Description** |
+| ---------------------- | ----------------------------------------------------------------------- |
+| _Documentation~_ | Contains the documentation for the Unity package. |
+| _Editor_ | Contains utilities for Editor windows and drawers. |
+| _Plugins_ | Contains third-party DLLs. |
+| _Runtime_ | Contains core C# APIs for integrating ML-Agents into your Unity scene. |
+| _Runtime/Integrations_ | Contains utilities for integrating ML-Agents into specific game genres. |
+| _Tests_ | Contains the unit tests for the package. |
+
+
+
+## Installation
+
+To install this _ML-Agents_ package, follow the instructions in the [Package
+Manager documentation].
+
+To install the companion Python package to enable training behaviors, follow the
+[installation instructions] on our [GitHub repository].
+
+### Advanced Installation
+With the changes to Unity Package Manager in 2021, experimental packages will not show up in the package list and have to be installed manually. There are two recommended ways to install the package manually:
+
+#### Github via Package Manager
+
+In Unity 2019.4 or later, open the Package Manager, hit the "+" button, and select "Add package from git URL".
+
+
+
+In the dialog that appears, enter
+ ```
+git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents#release_19
+```
+
+You can also edit your project's `manifest.json` directly and add the following line to the `dependencies`
+section:
+```
+"com.unity.ml-agents": "git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents#release_19",
+```
+See [Git dependencies](https://docs.unity3d.com/Manual/upm-git.html#subfolder) for more information. Note that this
+may take several minutes to resolve the packages the first time that you add it.
+
+#### Local Installation for Development
+
+[Clone the repository](https://github.com/Unity-Technologies/ml-agents/tree/release_19_docs/docs/Installation.md#clone-the-ml-agents-toolkit-repository-optional) and follow the
+[Local Installation for Development](https://github.com/Unity-Technologies/ml-agents/tree/release_19_docs/docs/Installation.md#advanced-local-installation-for-development-1)
+directions.
+
+## Requirements
+
+This version of the Unity ML-Agents package is compatible with the following
+versions of the Unity Editor:
+
+- 2019.4 and later
+
+## Known Limitations
+
+### Training
+
+Training is limited to the Unity Editor and Standalone builds on Windows, MacOS,
+and Linux with the Mono scripting backend. Currently, training does not work
+with the IL2CPP scripting backend. Your environment will default to inference
+mode if training is not supported or is not currently running.
+
+### Inference
+
+Inference is executed via the
+[Unity Inference Engine](https://docs.unity3d.com/Packages/com.unity.barracuda@latest/index.html).
+
+**CPU**
+
+All platforms supported.
+
+**GPU**
+
+All platforms supported except:
+
+- WebGL and GLES 3/2 on Android / iPhone
+
+**NOTE:** Mobile platform support includes:
+
+- Vulkan for Android
+- Metal for iOS.
+
+### Headless Mode
+
+If you enable Headless mode, you will not be able to collect visual observations
+from your agents.
+
+### Rendering Speed and Synchronization
+
+Currently the speed of the game physics can only be increased to 100x real-time.
+The Academy also moves in time with FixedUpdate() rather than Update(), so game
+behavior implemented in Update() may be out of sync with the agent decision
+making. See [Execution Order of Event Functions] for more information.
+
+You can control the frequency of Academy stepping by calling
+`Academy.Instance.DisableAutomaticStepping()`, and then calling
+`Academy.Instance.EnvironmentStep()`
+
+### Unity Inference Engine Models
+
+Currently, only models created with our trainers are supported for running
+ML-Agents with a neural network behavior.
+
+## Helpful links
+
+If you are new to the Unity ML-Agents package, or have a question after reading
+the documentation, you can checkout our [GitHub Repository], which also includes
+a number of ways to [connect with us] including our [ML-Agents Forum].
+
+In order to improve the developer experience for Unity ML-Agents Toolkit, we have added in-editor analytics.
+Please refer to "Information that is passively collected by Unity" in the
+[Unity Privacy Policy](https://unity3d.com/legal/privacy-policy).
+
+[unity ML-Agents Toolkit]: https://github.com/Unity-Technologies/ml-agents
+[unity inference engine]: https://docs.unity3d.com/Packages/com.unity.barracuda@latest/index.html
+[package manager documentation]: https://docs.unity3d.com/Manual/upm-ui-install.html
+[installation instructions]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Installation.md
+[github repository]: https://github.com/Unity-Technologies/ml-agents
+[python package]: https://github.com/Unity-Technologies/ml-agents
+[execution order of event functions]: https://docs.unity3d.com/Manual/ExecutionOrder.html
+[connect with us]: https://github.com/Unity-Technologies/ml-agents#community-and-feedback
+[ml-agents forum]: https://forum.unity.com/forums/ml-agents.453/
+[ML-Agents GitHub repo]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/com.unity.ml-agents.extensions
diff --git a/com.unity.ml-agents/Documentation~/filter.yml b/com.unity.ml-agents/Documentation~/filter.yml
new file mode 100755
index 0000000000..ce144daf61
--- /dev/null
+++ b/com.unity.ml-agents/Documentation~/filter.yml
@@ -0,0 +1,14 @@
+apiRules:
+- exclude:
+ uidRegex: .*Test.*
+ type: Namespace
+- exclude:
+ uidRegex: ^Unity.MLAgents\.CommunicatorObjects$
+ type: Namespace
+- exclude:
+ uidRegex: ^Unity.MLAgents\.Editor$
+ type: Namespace
+- exclude:
+ uidRegex: ^Unity.MLAgentsExamples$
+ type: Namespace
+
diff --git a/com.unity.ml-agents/Editor.meta b/com.unity.ml-agents/Editor.meta
new file mode 100644
index 0000000000..89d980b088
--- /dev/null
+++ b/com.unity.ml-agents/Editor.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: e9de88a64ac5c4d2eb8955836199d61b
+folderAsset: yes
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/AgentEditor.cs b/com.unity.ml-agents/Editor/AgentEditor.cs
new file mode 100644
index 0000000000..fdebf1cb15
--- /dev/null
+++ b/com.unity.ml-agents/Editor/AgentEditor.cs
@@ -0,0 +1,31 @@
+using UnityEngine;
+using UnityEditor;
+
+namespace Unity.MLAgents.Editor
+{
+ /*
+ This code is meant to modify the behavior of the inspector on Agent Components.
+ */
+ [CustomEditor(typeof(Agent), true)]
+ [CanEditMultipleObjects]
+ internal class AgentEditor : UnityEditor.Editor
+ {
+ public override void OnInspectorGUI()
+ {
+ var serializedAgent = serializedObject;
+ serializedAgent.Update();
+
+ var maxSteps = serializedAgent.FindProperty("MaxStep");
+
+ EditorGUILayout.PropertyField(
+ maxSteps,
+ new GUIContent("Max Step", "The per-agent maximum number of steps.")
+ );
+
+ serializedAgent.ApplyModifiedProperties();
+
+ EditorGUILayout.LabelField("", GUI.skin.horizontalSlider);
+ base.OnInspectorGUI();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/AgentEditor.cs.meta b/com.unity.ml-agents/Editor/AgentEditor.cs.meta
new file mode 100755
index 0000000000..66bc325f8b
--- /dev/null
+++ b/com.unity.ml-agents/Editor/AgentEditor.cs.meta
@@ -0,0 +1,12 @@
+fileFormatVersion: 2
+guid: c3b291e1cd0c64781861652b579d0ac1
+timeCreated: 1503270350
+licenseType: Free
+MonoImporter:
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/BehaviorParametersEditor.cs b/com.unity.ml-agents/Editor/BehaviorParametersEditor.cs
new file mode 100644
index 0000000000..a95b2846f3
--- /dev/null
+++ b/com.unity.ml-agents/Editor/BehaviorParametersEditor.cs
@@ -0,0 +1,192 @@
+using System.Collections.Generic;
+using UnityEditor;
+using Unity.Barracuda;
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.Policies;
+using Unity.MLAgents.Sensors;
+using Unity.MLAgents.Sensors.Reflection;
+using CheckTypeEnum = Unity.MLAgents.Inference.BarracudaModelParamLoader.FailedCheck.CheckTypeEnum;
+
+namespace Unity.MLAgents.Editor
+{
+ /*
+ This code is meant to modify the behavior of the inspector on Agent Components.
+ */
+ [CustomEditor(typeof(BehaviorParameters))]
+ [CanEditMultipleObjects]
+ internal class BehaviorParametersEditor : UnityEditor.Editor
+ {
+ const float k_TimeBetweenModelReloads = 2f;
+ // Time since the last reload of the model
+ float m_TimeSinceModelReload;
+ // Whether or not the model needs to be reloaded
+ bool m_RequireReload;
+ const string k_BehaviorName = "m_BehaviorName";
+ const string k_BrainParametersName = "m_BrainParameters";
+ const string k_ModelName = "m_Model";
+ const string k_InferenceDeviceName = "m_InferenceDevice";
+ const string k_DeterministicInference = "m_DeterministicInference";
+ const string k_BehaviorTypeName = "m_BehaviorType";
+ const string k_TeamIdName = "TeamId";
+ const string k_UseChildSensorsName = "m_UseChildSensors";
+ const string k_ObservableAttributeHandlingName = "m_ObservableAttributeHandling";
+
+ public override void OnInspectorGUI()
+ {
+ var so = serializedObject;
+ so.Update();
+ bool needPolicyUpdate; // Whether the name, model, inference device, or BehaviorType changed.
+
+ var behaviorParameters = (BehaviorParameters)target;
+ var agent = behaviorParameters.gameObject.GetComponent();
+ if (agent == null)
+ {
+ EditorGUILayout.HelpBox(
+ "No Agent is associated with this Behavior Parameters. Attach an Agent to " +
+ "this GameObject to configure your Agent with these behavior parameters.",
+ MessageType.Warning);
+ }
+
+ // Drawing the Behavior Parameters
+ EditorGUI.indentLevel++;
+ EditorGUI.BeginChangeCheck(); // global
+
+ EditorGUI.BeginChangeCheck();
+ {
+ EditorGUILayout.PropertyField(so.FindProperty(k_BehaviorName));
+ }
+ needPolicyUpdate = EditorGUI.EndChangeCheck();
+
+ EditorGUI.BeginChangeCheck();
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ EditorGUILayout.PropertyField(so.FindProperty(k_BrainParametersName), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ EditorGUI.BeginChangeCheck();
+ {
+ EditorGUILayout.PropertyField(so.FindProperty(k_ModelName), true);
+ EditorGUI.indentLevel++;
+ EditorGUILayout.PropertyField(so.FindProperty(k_InferenceDeviceName), true);
+ EditorGUILayout.PropertyField(so.FindProperty(k_DeterministicInference), true);
+ EditorGUI.indentLevel--;
+ }
+ needPolicyUpdate = needPolicyUpdate || EditorGUI.EndChangeCheck();
+
+ EditorGUI.BeginChangeCheck();
+ {
+ EditorGUILayout.PropertyField(so.FindProperty(k_BehaviorTypeName));
+ }
+ needPolicyUpdate = needPolicyUpdate || EditorGUI.EndChangeCheck();
+
+ EditorGUILayout.PropertyField(so.FindProperty(k_TeamIdName));
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ EditorGUILayout.PropertyField(so.FindProperty(k_UseChildSensorsName), true);
+ EditorGUILayout.PropertyField(so.FindProperty(k_ObservableAttributeHandlingName), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ EditorGUI.indentLevel--;
+ m_RequireReload = EditorGUI.EndChangeCheck();
+ DisplayFailedModelChecks();
+ so.ApplyModifiedProperties();
+
+ if (needPolicyUpdate)
+ {
+ UpdateAgentPolicy();
+ }
+ }
+
+ ///
+ /// Must be called within OnEditorGUI()
+ ///
+ void DisplayFailedModelChecks()
+ {
+ if (m_RequireReload && m_TimeSinceModelReload > k_TimeBetweenModelReloads)
+ {
+ m_RequireReload = false;
+ m_TimeSinceModelReload = 0;
+ }
+ // Display all failed checks
+ D.logEnabled = false;
+ Model barracudaModel = null;
+ var model = (NNModel)serializedObject.FindProperty(k_ModelName).objectReferenceValue;
+ var behaviorParameters = (BehaviorParameters)target;
+
+ // Grab the sensor components, since we need them to determine the observation sizes.
+ // TODO make these methods of BehaviorParameters
+ var agent = behaviorParameters.gameObject.GetComponent();
+ if (agent == null)
+ {
+ return;
+ }
+ agent.sensors = new List();
+ agent.InitializeSensors();
+ var sensors = agent.sensors.ToArray();
+
+ ActuatorComponent[] actuatorComponents;
+ if (behaviorParameters.UseChildActuators)
+ {
+ actuatorComponents = behaviorParameters.GetComponentsInChildren();
+ }
+ else
+ {
+ actuatorComponents = behaviorParameters.GetComponents();
+ }
+
+ // Get the total size of the sensors generated by ObservableAttributes.
+ // If there are any errors (e.g. unsupported type, write-only properties), display them too.
+ int observableAttributeSensorTotalSize = 0;
+ if (agent != null && behaviorParameters.ObservableAttributeHandling != ObservableAttributeOptions.Ignore)
+ {
+ List observableErrors = new List();
+ observableAttributeSensorTotalSize = ObservableAttribute.GetTotalObservationSize(agent, false, observableErrors);
+ foreach (var check in observableErrors)
+ {
+ EditorGUILayout.HelpBox(check, MessageType.Warning);
+ }
+ }
+
+ var brainParameters = behaviorParameters.BrainParameters;
+ if (model != null)
+ {
+ barracudaModel = ModelLoader.Load(model);
+ }
+ if (brainParameters != null)
+ {
+ var failedChecks = Inference.BarracudaModelParamLoader.CheckModel(
+ barracudaModel, brainParameters, sensors, actuatorComponents,
+ observableAttributeSensorTotalSize, behaviorParameters.BehaviorType, behaviorParameters.DeterministicInference
+ );
+ foreach (var check in failedChecks)
+ {
+ if (check != null)
+ {
+ switch (check.CheckType)
+ {
+ case CheckTypeEnum.Info:
+ EditorGUILayout.HelpBox(check.Message, MessageType.Info);
+ break;
+ case CheckTypeEnum.Warning:
+ EditorGUILayout.HelpBox(check.Message, MessageType.Warning);
+ break;
+ case CheckTypeEnum.Error:
+ EditorGUILayout.HelpBox(check.Message, MessageType.Error);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ void UpdateAgentPolicy()
+ {
+ var behaviorParameters = (BehaviorParameters)target;
+ behaviorParameters.UpdateAgentPolicy();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/BehaviorParametersEditor.cs.meta b/com.unity.ml-agents/Editor/BehaviorParametersEditor.cs.meta
new file mode 100644
index 0000000000..6eb612f3e3
--- /dev/null
+++ b/com.unity.ml-agents/Editor/BehaviorParametersEditor.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 72b0b21a2d4ee4bc2be0530fd134720d
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/BrainParametersDrawer.cs b/com.unity.ml-agents/Editor/BrainParametersDrawer.cs
new file mode 100644
index 0000000000..52f40e20d3
--- /dev/null
+++ b/com.unity.ml-agents/Editor/BrainParametersDrawer.cs
@@ -0,0 +1,172 @@
+using UnityEngine;
+using UnityEditor;
+using Unity.MLAgents.Policies;
+
+namespace Unity.MLAgents.Editor
+{
+ ///
+ /// PropertyDrawer for BrainParameters. Defines how BrainParameters are displayed in the
+ /// Inspector.
+ ///
+ [CustomPropertyDrawer(typeof(BrainParameters))]
+ internal class BrainParametersDrawer : PropertyDrawer
+ {
+ // The height of a line in the Unity Inspectors
+ const float k_LineHeight = 17f;
+ const int k_VecObsNumLine = 3;
+ const string k_ActionSpecName = "m_ActionSpec";
+ const string k_ContinuousActionSizeName = "m_NumContinuousActions";
+ const string k_DiscreteBranchSizeName = "BranchSizes";
+ const string k_ActionDescriptionPropName = "VectorActionDescriptions";
+ const string k_VecObsPropName = "VectorObservationSize";
+ const string k_NumVecObsPropName = "NumStackedVectorObservations";
+
+ ///
+ public override float GetPropertyHeight(SerializedProperty property, GUIContent label)
+ {
+ return GetHeightDrawVectorObservation() +
+ GetHeightDrawVectorAction(property);
+ }
+
+ ///
+ public override void OnGUI(Rect position, SerializedProperty property, GUIContent label)
+ {
+ var indent = EditorGUI.indentLevel;
+ EditorGUI.indentLevel = 0;
+ position.height = k_LineHeight;
+ EditorGUI.BeginProperty(position, label, property);
+ EditorGUI.indentLevel++;
+
+ // Vector Observations
+ DrawVectorObservation(position, property);
+ position.y += GetHeightDrawVectorObservation();
+
+ // Vector Action
+ DrawVectorAction(position, property);
+ position.y += GetHeightDrawVectorAction(property);
+
+ EditorGUI.EndProperty();
+ EditorGUI.indentLevel = indent;
+ }
+
+ ///
+ /// Draws the Vector Observations for the Brain Parameters
+ ///
+ /// Rectangle on the screen to use for the property GUI.
+ /// The SerializedProperty of the BrainParameters
+ /// to make the custom GUI for.
+ static void DrawVectorObservation(Rect position, SerializedProperty property)
+ {
+ EditorGUI.LabelField(position, "Vector Observation");
+ position.y += k_LineHeight;
+
+ EditorGUI.indentLevel++;
+ EditorGUI.PropertyField(position,
+ property.FindPropertyRelative(k_VecObsPropName),
+ new GUIContent("Space Size",
+ "Length of state " +
+ "vector for brain (In Continuous state space)." +
+ "Or number of possible values (in Discrete state space)."));
+ position.y += k_LineHeight;
+
+ EditorGUI.PropertyField(position,
+ property.FindPropertyRelative(k_NumVecObsPropName),
+ new GUIContent("Stacked Vectors",
+ "Number of states that will be stacked before " +
+ "being fed to the neural network."));
+ position.y += k_LineHeight;
+ EditorGUI.indentLevel--;
+ }
+
+ ///
+ /// The Height required to draw the Vector Observations paramaters
+ ///
+ /// The height of the drawer of the Vector Observations
+ static float GetHeightDrawVectorObservation()
+ {
+ return k_VecObsNumLine * k_LineHeight;
+ }
+
+ ///
+ /// Draws the Vector Actions parameters for the Brain Parameters
+ ///
+ /// Rectangle on the screen to use for the property GUI.
+ /// The SerializedProperty of the BrainParameters
+ /// to make the custom GUI for.
+ static void DrawVectorAction(Rect position, SerializedProperty property)
+ {
+ EditorGUI.LabelField(position, "Actions");
+ position.y += k_LineHeight;
+ EditorGUI.indentLevel++;
+ var actionSpecProperty = property.FindPropertyRelative(k_ActionSpecName);
+ DrawContinuousVectorAction(position, actionSpecProperty);
+ position.y += k_LineHeight;
+ DrawDiscreteVectorAction(position, actionSpecProperty);
+ }
+
+ ///
+ /// Draws the Continuous Vector Actions parameters for the Brain Parameters
+ ///
+ /// Rectangle on the screen to use for the property GUI.
+ /// The SerializedProperty of the BrainParameters
+ /// to make the custom GUI for.
+ static void DrawContinuousVectorAction(Rect position, SerializedProperty property)
+ {
+ var continuousActionSize = property.FindPropertyRelative(k_ContinuousActionSizeName);
+ EditorGUI.PropertyField(
+ position,
+ continuousActionSize,
+ new GUIContent("Continuous Actions", "Number of continuous actions."));
+ }
+
+ ///
+ /// Draws the Discrete Vector Actions parameters for the Brain Parameters
+ ///
+ /// Rectangle on the screen to use for the property GUI.
+ /// The SerializedProperty of the BrainParameters
+ /// to make the custom GUI for.
+ static void DrawDiscreteVectorAction(Rect position, SerializedProperty property)
+ {
+ var branchSizes = property.FindPropertyRelative(k_DiscreteBranchSizeName);
+ var newSize = EditorGUI.IntField(
+ position, "Discrete Branches", branchSizes.arraySize);
+
+ // This check is here due to:
+ // https://fogbugz.unity3d.com/f/cases/1246524/
+ // If this case has been resolved, please remove this if condition.
+ if (newSize != branchSizes.arraySize)
+ {
+ branchSizes.arraySize = newSize;
+ }
+
+ position.y += k_LineHeight;
+ position.x += 20;
+ position.width -= 20;
+ for (var branchIndex = 0;
+ branchIndex < branchSizes.arraySize;
+ branchIndex++)
+ {
+ var branchActionSize =
+ branchSizes.GetArrayElementAtIndex(branchIndex);
+
+ EditorGUI.PropertyField(
+ position,
+ branchActionSize,
+ new GUIContent("Branch " + branchIndex + " Size",
+ "Number of possible actions for the branch number " + branchIndex + "."));
+ position.y += k_LineHeight;
+ }
+ }
+
+ ///
+ /// The Height required to draw the Vector Action parameters.
+ ///
+ /// The height of the drawer of the Vector Action.
+ static float GetHeightDrawVectorAction(SerializedProperty property)
+ {
+ var actionSpecProperty = property.FindPropertyRelative(k_ActionSpecName);
+ var numActionLines = 3 + actionSpecProperty.FindPropertyRelative(k_DiscreteBranchSizeName).arraySize;
+ return numActionLines * k_LineHeight;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/BrainParametersDrawer.cs.meta b/com.unity.ml-agents/Editor/BrainParametersDrawer.cs.meta
new file mode 100644
index 0000000000..9379a5f0eb
--- /dev/null
+++ b/com.unity.ml-agents/Editor/BrainParametersDrawer.cs.meta
@@ -0,0 +1,12 @@
+fileFormatVersion: 2
+guid: b060ae8e687cf49bcae88b24db17bfa6
+timeCreated: 1517291065
+licenseType: Free
+MonoImporter:
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/BufferSensorComponentEditor.cs b/com.unity.ml-agents/Editor/BufferSensorComponentEditor.cs
new file mode 100644
index 0000000000..f41edb2740
--- /dev/null
+++ b/com.unity.ml-agents/Editor/BufferSensorComponentEditor.cs
@@ -0,0 +1,31 @@
+using UnityEditor;
+using Unity.MLAgents.Sensors;
+
+namespace Unity.MLAgents.Editor
+{
+ [CustomEditor(typeof(BufferSensorComponent), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class BufferSensorComponentEditor : UnityEditor.Editor
+ {
+ public override void OnInspectorGUI()
+ {
+ var so = serializedObject;
+ so.Update();
+
+ // Drawing the BufferSensorComponent
+
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ // These fields affect the sensor order or observation size,
+ // So can't be changed at runtime.
+ EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservableSize"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_MaxNumObservables"), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ so.ApplyModifiedProperties();
+ }
+
+ }
+}
diff --git a/com.unity.ml-agents/Editor/BufferSensorComponentEditor.cs.meta b/com.unity.ml-agents/Editor/BufferSensorComponentEditor.cs.meta
new file mode 100644
index 0000000000..62de961c7e
--- /dev/null
+++ b/com.unity.ml-agents/Editor/BufferSensorComponentEditor.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: b042fe65027f94c1eb38a2ee1362d38d
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs b/com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs
new file mode 100644
index 0000000000..1df66ee3c9
--- /dev/null
+++ b/com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs
@@ -0,0 +1,49 @@
+using UnityEditor;
+using Unity.MLAgents.Sensors;
+
+namespace Unity.MLAgents.Editor
+{
+ [CustomEditor(typeof(CameraSensorComponent), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class CameraSensorComponentEditor : UnityEditor.Editor
+ {
+ public override void OnInspectorGUI()
+ {
+ var so = serializedObject;
+ so.Update();
+
+ // Drawing the CameraSensorComponent
+ EditorGUI.BeginChangeCheck();
+
+ EditorGUILayout.PropertyField(so.FindProperty("m_Camera"), true);
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ // These fields affect the sensor order or observation size,
+ // So can't be changed at runtime.
+ EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_Width"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_Height"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_Grayscale"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservationStacks"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservationType"), true);
+ }
+ EditorGUI.EndDisabledGroup();
+ EditorGUILayout.PropertyField(so.FindProperty("m_RuntimeCameraEnable"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_Compression"), true);
+
+ var requireSensorUpdate = EditorGUI.EndChangeCheck();
+ so.ApplyModifiedProperties();
+
+ if (requireSensorUpdate)
+ {
+ UpdateSensor();
+ }
+ }
+
+ void UpdateSensor()
+ {
+ var sensorComponent = serializedObject.targetObject as CameraSensorComponent;
+ sensorComponent?.UpdateSensor();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs.meta b/com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs.meta
new file mode 100644
index 0000000000..70b1e31432
--- /dev/null
+++ b/com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: fdda773c024894cf0ae47d1b1396c38d
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/DemonstrationDrawer.cs b/com.unity.ml-agents/Editor/DemonstrationDrawer.cs
new file mode 100644
index 0000000000..d15baa9a1d
--- /dev/null
+++ b/com.unity.ml-agents/Editor/DemonstrationDrawer.cs
@@ -0,0 +1,137 @@
+using System.Collections.Generic;
+using System.Text;
+using UnityEditor;
+using Unity.MLAgents.Demonstrations;
+
+namespace Unity.MLAgents.Editor
+{
+ ///
+ /// Renders a custom UI for DemonstrationSummary ScriptableObject.
+ ///
+ [CustomEditor(typeof(DemonstrationSummary))]
+ [CanEditMultipleObjects]
+ internal class DemonstrationEditor : UnityEditor.Editor
+ {
+ SerializedProperty m_BrainParameters;
+ SerializedProperty m_DemoMetaData;
+ SerializedProperty m_ObservationShapes;
+ const string k_BrainParametersName = "brainParameters";
+ const string k_MetaDataName = "metaData";
+ const string k_ObservationSummariesName = "observationSummaries";
+ const string k_DemonstrationName = "demonstrationName";
+ const string k_NumberStepsName = "numberSteps";
+ const string k_NumberEpisodesName = "numberEpisodes";
+ const string k_MeanRewardName = "meanReward";
+ const string k_ActionSpecName = "m_ActionSpec";
+ const string k_NumContinuousActionsName = "m_NumContinuousActions";
+ const string k_NumDiscreteActionsName = "BranchSizes";
+ const string k_ShapeName = "shape";
+
+
+ void OnEnable()
+ {
+ m_BrainParameters = serializedObject.FindProperty(k_BrainParametersName);
+ m_DemoMetaData = serializedObject.FindProperty(k_MetaDataName);
+ m_ObservationShapes = serializedObject.FindProperty(k_ObservationSummariesName);
+ }
+
+ ///
+ /// Renders Inspector UI for Demonstration metadata.
+ ///
+ void MakeMetaDataProperty(SerializedProperty property)
+ {
+ var nameProp = property.FindPropertyRelative(k_DemonstrationName);
+ var experiencesProp = property.FindPropertyRelative(k_NumberStepsName);
+ var episodesProp = property.FindPropertyRelative(k_NumberEpisodesName);
+ var rewardsProp = property.FindPropertyRelative(k_MeanRewardName);
+
+ var nameLabel = nameProp.displayName + ": " + nameProp.stringValue;
+ var experiencesLabel = experiencesProp.displayName + ": " + experiencesProp.intValue;
+ var episodesLabel = episodesProp.displayName + ": " + episodesProp.intValue;
+ var rewardsLabel = rewardsProp.displayName + ": " + rewardsProp.floatValue;
+
+ EditorGUILayout.LabelField(nameLabel);
+ EditorGUILayout.LabelField(experiencesLabel);
+ EditorGUILayout.LabelField(episodesLabel);
+ EditorGUILayout.LabelField(rewardsLabel);
+ }
+
+ ///
+ /// Constructs label for a serialized integer array.
+ ///
+ static string BuildIntArrayLabel(SerializedProperty actionSizeProperty)
+ {
+ var actionSize = actionSizeProperty.arraySize;
+ var actionLabel = new StringBuilder("[ ");
+ for (var i = 0; i < actionSize; i++)
+ {
+ actionLabel.Append(actionSizeProperty.GetArrayElementAtIndex(i).intValue);
+ if (i < actionSize - 1)
+ {
+ actionLabel.Append(", ");
+ }
+ }
+
+ actionLabel.Append(" ]");
+ return actionLabel.ToString();
+ }
+
+ ///
+ /// Renders Inspector UI for BrainParameters of a DemonstrationSummary.
+ /// Only the Action size and type are used from the BrainParameters.
+ ///
+ void MakeActionsProperty(SerializedProperty property)
+ {
+ var actSpecProperty = property.FindPropertyRelative(k_ActionSpecName);
+ var continuousSizeProperty = actSpecProperty.FindPropertyRelative(k_NumContinuousActionsName);
+ var discreteSizeProperty = actSpecProperty.FindPropertyRelative(k_NumDiscreteActionsName);
+ var continuousSizeLabel = "Continuous Actions: " + continuousSizeProperty.intValue;
+ var discreteSizeLabel = "Discrete Action Branches: ";
+ discreteSizeLabel += discreteSizeProperty == null ? "[]" : BuildIntArrayLabel(discreteSizeProperty);
+ EditorGUILayout.LabelField(continuousSizeLabel);
+ EditorGUILayout.LabelField(discreteSizeLabel);
+ }
+
+ ///
+ /// Render the observation shapes of a DemonstrationSummary.
+ ///
+ ///
+ void MakeObservationsProperty(SerializedProperty obsSummariesProperty)
+ {
+ var shapesLabels = new List();
+ var numObservations = obsSummariesProperty.arraySize;
+ for (var i = 0; i < numObservations; i++)
+ {
+ var summary = obsSummariesProperty.GetArrayElementAtIndex(i);
+ var shapeProperty = summary.FindPropertyRelative(k_ShapeName);
+ shapesLabels.Add(BuildIntArrayLabel(shapeProperty));
+ }
+
+ var shapeLabel = $"Shapes: {string.Join(", ", shapesLabels)}";
+ EditorGUILayout.LabelField(shapeLabel);
+
+ }
+
+ public override void OnInspectorGUI()
+ {
+ serializedObject.Update();
+
+ EditorGUILayout.LabelField("Meta Data", EditorStyles.boldLabel);
+ EditorGUI.indentLevel++;
+ MakeMetaDataProperty(m_DemoMetaData);
+ EditorGUI.indentLevel--;
+
+ EditorGUILayout.LabelField("Observations", EditorStyles.boldLabel);
+ EditorGUI.indentLevel++;
+ MakeObservationsProperty(m_ObservationShapes);
+ EditorGUI.indentLevel--;
+
+ EditorGUILayout.LabelField("Actions", EditorStyles.boldLabel);
+ EditorGUI.indentLevel++;
+ MakeActionsProperty(m_BrainParameters);
+ EditorGUI.indentLevel--;
+
+ serializedObject.ApplyModifiedProperties();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/DemonstrationDrawer.cs.meta b/com.unity.ml-agents/Editor/DemonstrationDrawer.cs.meta
new file mode 100644
index 0000000000..57c0681302
--- /dev/null
+++ b/com.unity.ml-agents/Editor/DemonstrationDrawer.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 84f9cd83f56c74790a51444a6cfe4945
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/DemonstrationImporter.cs b/com.unity.ml-agents/Editor/DemonstrationImporter.cs
new file mode 100644
index 0000000000..767f5556f9
--- /dev/null
+++ b/com.unity.ml-agents/Editor/DemonstrationImporter.cs
@@ -0,0 +1,75 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using Unity.MLAgents.CommunicatorObjects;
+using UnityEditor;
+using UnityEngine;
+#if UNITY_2020_2_OR_NEWER
+using UnityEditor.AssetImporters;
+#else
+using UnityEditor.Experimental.AssetImporters;
+#endif
+using Unity.MLAgents.Demonstrations;
+
+namespace Unity.MLAgents.Editor
+{
+ ///
+ /// Asset Importer used to parse demonstration files.
+ ///
+ [ScriptedImporter(1, new[] { "demo" })]
+ internal class DemonstrationImporter : ScriptedImporter
+ {
+ const string k_IconPath = "Packages/com.unity.ml-agents/Editor/Icons/DemoIcon.png";
+
+ public override void OnImportAsset(AssetImportContext ctx)
+ {
+ var inputType = Path.GetExtension(ctx.assetPath);
+ if (inputType == null)
+ {
+ throw new Exception("Demonstration import error.");
+ }
+
+ try
+ {
+ // Read first three proto objects containing metadata, brain parameters, and observations.
+ Stream reader = File.OpenRead(ctx.assetPath);
+
+ var metaDataProto = DemonstrationMetaProto.Parser.ParseDelimitedFrom(reader);
+ var metaData = metaDataProto.ToDemonstrationMetaData();
+
+ reader.Seek(DemonstrationWriter.MetaDataBytes + 1, 0);
+ var brainParamsProto = BrainParametersProto.Parser.ParseDelimitedFrom(reader);
+ var brainParameters = brainParamsProto.ToBrainParameters();
+
+ // Read the first AgentInfoActionPair so that we can get the observation sizes.
+ List observationSummaries;
+ try
+ {
+ var agentInfoActionPairProto = AgentInfoActionPairProto.Parser.ParseDelimitedFrom(reader);
+ observationSummaries = agentInfoActionPairProto.GetObservationSummaries();
+ }
+ catch
+ {
+ // Just in case there weren't any AgentInfoActionPair or they couldn't be read.
+ observationSummaries = new List();
+ }
+
+ reader.Close();
+
+ var demonstrationSummary = ScriptableObject.CreateInstance();
+ demonstrationSummary.Initialize(brainParameters, metaData, observationSummaries);
+ userData = demonstrationSummary.ToString();
+
+ var texture = (Texture2D)
+ AssetDatabase.LoadAssetAtPath(k_IconPath, typeof(Texture2D));
+
+ ctx.AddObjectToAsset(ctx.assetPath, demonstrationSummary, texture);
+ ctx.SetMainObject(demonstrationSummary);
+ }
+ catch
+ {
+ // ignored
+ }
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/DemonstrationImporter.cs.meta b/com.unity.ml-agents/Editor/DemonstrationImporter.cs.meta
new file mode 100644
index 0000000000..bbdca977a3
--- /dev/null
+++ b/com.unity.ml-agents/Editor/DemonstrationImporter.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 7bd65ce151aaa4a41a45312543c56be1
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/EditorUtilities.cs b/com.unity.ml-agents/Editor/EditorUtilities.cs
new file mode 100644
index 0000000000..8ef266f259
--- /dev/null
+++ b/com.unity.ml-agents/Editor/EditorUtilities.cs
@@ -0,0 +1,19 @@
+using UnityEngine;
+
+namespace Unity.MLAgents.Editor
+{
+ ///
+ /// A static helper class for the Editor components of the ML-Agents SDK.
+ ///
+ public static class EditorUtilities
+ {
+ ///
+ /// Whether or not properties that affect the model can be updated at the current time.
+ ///
+ ///
+ public static bool CanUpdateModelProperties()
+ {
+ return !Application.isPlaying;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/EditorUtilities.cs.meta b/com.unity.ml-agents/Editor/EditorUtilities.cs.meta
new file mode 100644
index 0000000000..2e58d7e8a3
--- /dev/null
+++ b/com.unity.ml-agents/Editor/EditorUtilities.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 840f5a76642c24b789ee312f0aa8e33b
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/GridSensorComponentEditor.cs b/com.unity.ml-agents/Editor/GridSensorComponentEditor.cs
new file mode 100644
index 0000000000..fa9208e274
--- /dev/null
+++ b/com.unity.ml-agents/Editor/GridSensorComponentEditor.cs
@@ -0,0 +1,109 @@
+using UnityEditor;
+using UnityEngine;
+using Unity.MLAgents.Sensors;
+
+namespace Unity.MLAgents.Editor
+{
+ [CustomEditor(typeof(GridSensorComponent), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class GridSensorComponentEditor : UnityEditor.Editor
+ {
+ public override void OnInspectorGUI()
+ {
+#if !MLA_UNITY_PHYSICS_MODULE
+ EditorGUILayout.HelpBox("The Physics Module is not currently present. " +
+ "Please add it to your project in order to use the GridSensor APIs in the " +
+ $"{nameof(GridSensorComponent)}", MessageType.Warning);
+#endif
+
+ var so = serializedObject;
+ so.Update();
+
+ // Drawing the GridSensorComponent
+ EditorGUI.BeginChangeCheck();
+
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ // These fields affect the sensor order or observation size,
+ // So can't be changed at runtime.
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_SensorName)), true);
+
+ EditorGUILayout.LabelField("Grid Settings", EditorStyles.boldLabel);
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_CellScale)), true);
+ // We only supports 2D GridSensor now so lock gridSize.y to 1
+ var gridSize = so.FindProperty(nameof(GridSensorComponent.m_GridSize));
+ var gridSize2d = new Vector3Int(gridSize.vector3IntValue.x, 1, gridSize.vector3IntValue.z);
+ var newGridSize = EditorGUILayout.Vector3IntField("Grid Size", gridSize2d);
+ gridSize.vector3IntValue = new Vector3Int(newGridSize.x, 1, newGridSize.z);
+ }
+ EditorGUI.EndDisabledGroup();
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_AgentGameObject)), true);
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_RotateWithAgent)), true);
+
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ // detectable tags
+ var detectableTags = so.FindProperty(nameof(GridSensorComponent.m_DetectableTags));
+ var newSize = EditorGUILayout.IntField("Detectable Tags", detectableTags.arraySize);
+ if (newSize != detectableTags.arraySize)
+ {
+ detectableTags.arraySize = newSize;
+ }
+ EditorGUI.indentLevel++;
+ for (var i = 0; i < detectableTags.arraySize; i++)
+ {
+ var objectTag = detectableTags.GetArrayElementAtIndex(i);
+ EditorGUILayout.PropertyField(objectTag, new GUIContent("Tag " + i), true);
+ }
+ EditorGUI.indentLevel--;
+ }
+ EditorGUI.EndDisabledGroup();
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_ColliderMask)), true);
+ EditorGUILayout.LabelField("Sensor Settings", EditorStyles.boldLabel);
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_ObservationStacks)), true);
+ EditorGUI.EndDisabledGroup();
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_CompressionType)), true);
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ EditorGUILayout.LabelField("Collider and Buffer", EditorStyles.boldLabel);
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_InitialColliderBufferSize)), true);
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_MaxColliderBufferSize)), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ EditorGUILayout.LabelField("Debug Gizmo", EditorStyles.boldLabel);
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_ShowGizmos)), true);
+ EditorGUILayout.PropertyField(so.FindProperty(nameof(GridSensorComponent.m_GizmoYOffset)), true);
+
+ // detectable objects
+ var debugColors = so.FindProperty(nameof(GridSensorComponent.m_DebugColors));
+ var detectableObjectSize = so.FindProperty(nameof(GridSensorComponent.m_DetectableTags)).arraySize;
+ if (detectableObjectSize != debugColors.arraySize)
+ {
+ debugColors.arraySize = detectableObjectSize;
+ }
+ EditorGUILayout.LabelField("Debug Colors");
+ EditorGUI.indentLevel++;
+ for (var i = 0; i < debugColors.arraySize; i++)
+ {
+ var debugColor = debugColors.GetArrayElementAtIndex(i);
+ EditorGUILayout.PropertyField(debugColor, new GUIContent("Tag " + i + " Color"), true);
+ }
+ EditorGUI.indentLevel--;
+
+ var requireSensorUpdate = EditorGUI.EndChangeCheck();
+ so.ApplyModifiedProperties();
+
+ if (requireSensorUpdate)
+ {
+ UpdateSensor();
+ }
+ }
+
+ void UpdateSensor()
+ {
+ var sensorComponent = serializedObject.targetObject as GridSensorComponent;
+ sensorComponent?.UpdateSensor();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/GridSensorComponentEditor.cs.meta b/com.unity.ml-agents/Editor/GridSensorComponentEditor.cs.meta
new file mode 100644
index 0000000000..c27459abce
--- /dev/null
+++ b/com.unity.ml-agents/Editor/GridSensorComponentEditor.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 584686b36fcb2435c8be47d70c332ed0
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/Icons.meta b/com.unity.ml-agents/Editor/Icons.meta
new file mode 100644
index 0000000000..6071205cd3
--- /dev/null
+++ b/com.unity.ml-agents/Editor/Icons.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: e6f6d464e3884bf883137660dee8aebf
+timeCreated: 1581721596
\ No newline at end of file
diff --git a/com.unity.ml-agents/Editor/Icons/DemoIcon.png b/com.unity.ml-agents/Editor/Icons/DemoIcon.png
new file mode 100644
index 0000000000..ddc91181bf
Binary files /dev/null and b/com.unity.ml-agents/Editor/Icons/DemoIcon.png differ
diff --git a/com.unity.ml-agents/Editor/Icons/DemoIcon.png.meta b/com.unity.ml-agents/Editor/Icons/DemoIcon.png.meta
new file mode 100644
index 0000000000..37831fb256
--- /dev/null
+++ b/com.unity.ml-agents/Editor/Icons/DemoIcon.png.meta
@@ -0,0 +1,86 @@
+fileFormatVersion: 2
+guid: 3352a0e8d253b4a4ea3782a6d7e09d9b
+TextureImporter:
+ fileIDToRecycleName: {}
+ externalObjects: {}
+ serializedVersion: 4
+ mipmaps:
+ mipMapMode: 0
+ enableMipMap: 1
+ sRGBTexture: 1
+ linearTexture: 0
+ fadeOut: 0
+ borderMipMap: 0
+ mipMapsPreserveCoverage: 0
+ alphaTestReferenceValue: 0.5
+ mipMapFadeDistanceStart: 1
+ mipMapFadeDistanceEnd: 3
+ bumpmap:
+ convertToNormalMap: 0
+ externalNormalMap: 0
+ heightScale: 0.25
+ normalMapFilter: 0
+ isReadable: 0
+ grayScaleToAlpha: 0
+ generateCubemap: 6
+ cubemapConvolution: 0
+ seamlessCubemap: 0
+ textureFormat: 1
+ maxTextureSize: 2048
+ textureSettings:
+ serializedVersion: 2
+ filterMode: -1
+ aniso: -1
+ mipBias: -1
+ wrapU: -1
+ wrapV: -1
+ wrapW: -1
+ nPOTScale: 1
+ lightmap: 0
+ compressionQuality: 50
+ spriteMode: 0
+ spriteExtrude: 1
+ spriteMeshType: 1
+ alignment: 0
+ spritePivot: {x: 0.5, y: 0.5}
+ spritePixelsToUnits: 100
+ spriteBorder: {x: 0, y: 0, z: 0, w: 0}
+ spriteGenerateFallbackPhysicsShape: 1
+ alphaUsage: 1
+ alphaIsTransparency: 1
+ spriteTessellationDetail: -1
+ textureType: 0
+ textureShape: 1
+ maxTextureSizeSet: 0
+ compressionQualitySet: 0
+ textureFormatSet: 0
+ platformSettings:
+ - buildTarget: DefaultTexturePlatform
+ maxTextureSize: 2048
+ resizeAlgorithm: 0
+ textureFormat: -1
+ textureCompression: 1
+ compressionQuality: 50
+ crunchedCompression: 0
+ allowsAlphaSplitting: 0
+ overridden: 0
+ androidETC2FallbackOverride: 0
+ - buildTarget: Standalone
+ maxTextureSize: 2048
+ resizeAlgorithm: 0
+ textureFormat: -1
+ textureCompression: 1
+ compressionQuality: 50
+ crunchedCompression: 0
+ allowsAlphaSplitting: 0
+ overridden: 0
+ androidETC2FallbackOverride: 0
+ spriteSheet:
+ serializedVersion: 2
+ sprites: []
+ outline: []
+ physicsShape: []
+ spritePackingTag:
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/MLAgentsSettingsBuildProvider.cs b/com.unity.ml-agents/Editor/MLAgentsSettingsBuildProvider.cs
new file mode 100644
index 0000000000..a91c07ecd7
--- /dev/null
+++ b/com.unity.ml-agents/Editor/MLAgentsSettingsBuildProvider.cs
@@ -0,0 +1,69 @@
+using System.Linq;
+using UnityEngine;
+using UnityEditor;
+using UnityEditor.Build;
+using UnityEditor.Build.Reporting;
+
+
+namespace Unity.MLAgents.Editor
+{
+ internal class MLAgentsSettingsBuildProvider : IPreprocessBuildWithReport, IPostprocessBuildWithReport
+ {
+ private MLAgentsSettings m_SettingsAddedToPreloadedAssets;
+
+ public int callbackOrder => 0;
+
+ public void OnPreprocessBuild(BuildReport report)
+ {
+ var wasDirty = IsPlayerSettingsDirty();
+ m_SettingsAddedToPreloadedAssets = null;
+
+ var preloadedAssets = PlayerSettings.GetPreloadedAssets().ToList();
+ if (!preloadedAssets.Contains(MLAgentsSettingsManager.Settings))
+ {
+ m_SettingsAddedToPreloadedAssets = MLAgentsSettingsManager.Settings;
+ preloadedAssets.Add(m_SettingsAddedToPreloadedAssets);
+ PlayerSettings.SetPreloadedAssets(preloadedAssets.ToArray());
+ }
+
+ if (!wasDirty)
+ ClearPlayerSettingsDirtyFlag();
+ }
+
+ public void OnPostprocessBuild(BuildReport report)
+ {
+ if (m_SettingsAddedToPreloadedAssets == null)
+ return;
+
+ var wasDirty = IsPlayerSettingsDirty();
+
+ var preloadedAssets = PlayerSettings.GetPreloadedAssets().ToList();
+ if (preloadedAssets.Contains(m_SettingsAddedToPreloadedAssets))
+ {
+ preloadedAssets.Remove(m_SettingsAddedToPreloadedAssets);
+ PlayerSettings.SetPreloadedAssets(preloadedAssets.ToArray());
+ }
+
+ m_SettingsAddedToPreloadedAssets = null;
+
+ if (!wasDirty)
+ ClearPlayerSettingsDirtyFlag();
+ }
+
+
+ private static bool IsPlayerSettingsDirty()
+ {
+ var settings = Resources.FindObjectsOfTypeAll();
+ if (settings != null && settings.Length > 0)
+ return EditorUtility.IsDirty(settings[0]);
+ return false;
+ }
+
+ private static void ClearPlayerSettingsDirtyFlag()
+ {
+ var settings = Resources.FindObjectsOfTypeAll();
+ if (settings != null && settings.Length > 0)
+ EditorUtility.ClearDirty(settings[0]);
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/MLAgentsSettingsBuildProvider.cs.meta b/com.unity.ml-agents/Editor/MLAgentsSettingsBuildProvider.cs.meta
new file mode 100644
index 0000000000..214ea9863f
--- /dev/null
+++ b/com.unity.ml-agents/Editor/MLAgentsSettingsBuildProvider.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: bd59ff34305fa4259a2735e08afdb424
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/MLAgentsSettingsProvider.cs b/com.unity.ml-agents/Editor/MLAgentsSettingsProvider.cs
new file mode 100644
index 0000000000..4077927a4e
--- /dev/null
+++ b/com.unity.ml-agents/Editor/MLAgentsSettingsProvider.cs
@@ -0,0 +1,194 @@
+using System;
+using System.Linq;
+using System.IO;
+using System.Runtime.CompilerServices;
+using UnityEngine;
+using UnityEditor;
+using UnityEngine.UIElements;
+
+[assembly: InternalsVisibleTo("Unity.ML-Agents.DevTests.Editor")]
+namespace Unity.MLAgents.Editor
+{
+ internal class MLAgentsSettingsProvider : SettingsProvider, IDisposable
+ {
+ const string k_SettingsPath = "Project/ML-Agents";
+ private static MLAgentsSettingsProvider s_Instance;
+ private string[] m_AvailableSettingsAssets;
+ private int m_CurrentSelectedSettingsAsset;
+ private SerializedObject m_SettingsObject;
+ [SerializeField]
+ private MLAgentsSettings m_Settings;
+
+
+ private MLAgentsSettingsProvider(string path, SettingsScope scope = SettingsScope.Project)
+ : base(path, scope)
+ {
+ s_Instance = this;
+ }
+
+ [SettingsProvider]
+ public static SettingsProvider CreateMLAgentsSettingsProvider()
+ {
+ return new MLAgentsSettingsProvider(k_SettingsPath, SettingsScope.Project);
+ }
+
+ public override void OnActivate(string searchContext, VisualElement rootElement)
+ {
+ base.OnActivate(searchContext, rootElement);
+ MLAgentsSettingsManager.OnSettingsChange += Reinitialize;
+ }
+
+ public override void OnDeactivate()
+ {
+ base.OnDeactivate();
+ MLAgentsSettingsManager.OnSettingsChange -= Reinitialize;
+ }
+
+ public void Dispose()
+ {
+ m_SettingsObject?.Dispose();
+ }
+
+ public override void OnTitleBarGUI()
+ {
+ if (EditorGUILayout.DropdownButton(EditorGUIUtility.IconContent("_Popup"), FocusType.Passive, EditorStyles.label))
+ {
+ var menu = new GenericMenu();
+ for (var i = 0; i < m_AvailableSettingsAssets.Length; i++)
+ {
+ menu.AddItem(ExtractDisplayName(m_AvailableSettingsAssets[i]), m_CurrentSelectedSettingsAsset == i, (path) =>
+ {
+ MLAgentsSettingsManager.Settings = AssetDatabase.LoadAssetAtPath((string)path);
+ }, m_AvailableSettingsAssets[i]);
+ }
+ menu.AddSeparator("");
+ menu.AddItem(new GUIContent("New Settings Asset…"), false, CreateNewSettingsAsset);
+ menu.ShowAsContext();
+ Event.current.Use();
+ }
+ }
+
+ private GUIContent ExtractDisplayName(string name)
+ {
+ if (name.StartsWith("Assets/"))
+ name = name.Substring("Assets/".Length);
+ if (name.EndsWith(".asset"))
+ name = name.Substring(0, name.Length - ".asset".Length);
+ if (name.EndsWith(".mlagents.settings"))
+ name = name.Substring(0, name.Length - ".mlagents.settings".Length);
+
+ // Ugly hack: GenericMenu interprets "/" as a submenu path. But luckily, "/" is not the only slash we have in Unicode.
+ return new GUIContent(name.Replace("/", "\u29f8"));
+ }
+
+ private void CreateNewSettingsAsset()
+ {
+ // Asset database always use forward slashes. Use forward slashes for all the paths.
+ var projectName = PlayerSettings.productName;
+ var path = EditorUtility.SaveFilePanel("Create ML-Agents Settings File", "Assets",
+ projectName + ".mlagents.settings", "asset");
+ if (string.IsNullOrEmpty(path))
+ {
+ return;
+ }
+
+ path = path.Replace("\\", "/"); // Make sure we only get '/' separators.
+ var assetPath = Application.dataPath + "/";
+ if (!path.StartsWith(assetPath, StringComparison.CurrentCultureIgnoreCase))
+ {
+ Debug.LogError(string.Format(
+ "Settings must be stored in Assets folder of the project (got: '{0}')", path));
+ return;
+ }
+
+ var extension = Path.GetExtension(path);
+ if (string.Compare(extension, ".asset", StringComparison.InvariantCultureIgnoreCase) != 0)
+ {
+ path += ".asset";
+ }
+ var relativePath = "Assets/" + path.Substring(assetPath.Length);
+ CreateNewSettingsAsset(relativePath);
+ }
+
+ private static void CreateNewSettingsAsset(string relativePath)
+ {
+ var settings = ScriptableObject.CreateInstance();
+ AssetDatabase.CreateAsset(settings, relativePath);
+ EditorGUIUtility.PingObject(settings);
+ // Install the settings. This will lead to an MLAgentsManager.OnSettingsChange event
+ // which in turn will cause this Provider to reinitialize
+ MLAgentsSettingsManager.Settings = settings;
+ }
+
+ public override void OnGUI(string searchContext)
+ {
+ if (m_Settings == null)
+ {
+ InitializeWithCurrentSettings();
+ }
+
+ if (m_AvailableSettingsAssets.Length == 0)
+ {
+ EditorGUILayout.HelpBox(
+ "Click the button below to create a settings asset you can edit.",
+ MessageType.Info);
+ if (GUILayout.Button("Create settings asset", GUILayout.Height(30)))
+ CreateNewSettingsAsset();
+ GUILayout.Space(20);
+ }
+
+ using (new EditorGUI.DisabledScope(m_AvailableSettingsAssets.Length == 0))
+ {
+ EditorGUI.BeginChangeCheck();
+ EditorGUILayout.LabelField("Trainer Settings", EditorStyles.boldLabel);
+ EditorGUI.indentLevel++;
+ EditorGUILayout.PropertyField(m_SettingsObject.FindProperty("m_ConnectTrainer"), new GUIContent("Connect to Trainer"));
+ EditorGUILayout.PropertyField(m_SettingsObject.FindProperty("m_EditorPort"), new GUIContent("Editor Training Port"));
+ EditorGUI.indentLevel--;
+ if (EditorGUI.EndChangeCheck())
+ m_SettingsObject.ApplyModifiedProperties();
+ }
+ }
+
+ internal void InitializeWithCurrentSettings()
+ {
+ m_AvailableSettingsAssets = FindSettingsInProject();
+
+ m_Settings = MLAgentsSettingsManager.Settings;
+ var currentSettingsPath = AssetDatabase.GetAssetPath(m_Settings);
+ if (string.IsNullOrEmpty(currentSettingsPath))
+ {
+ if (m_AvailableSettingsAssets.Length > 0)
+ {
+ m_CurrentSelectedSettingsAsset = 0;
+ m_Settings = AssetDatabase.LoadAssetAtPath(m_AvailableSettingsAssets[0]);
+ MLAgentsSettingsManager.Settings = m_Settings;
+ }
+ }
+ else
+ {
+ var settingsList = m_AvailableSettingsAssets.ToList();
+ m_CurrentSelectedSettingsAsset = settingsList.IndexOf(currentSettingsPath);
+
+ EditorBuildSettings.AddConfigObject(MLAgentsSettingsManager.EditorBuildSettingsConfigKey, m_Settings, true);
+ }
+
+ m_SettingsObject = new SerializedObject(m_Settings);
+ }
+
+ private static string[] FindSettingsInProject()
+ {
+ var guids = AssetDatabase.FindAssets("t:MLAgentsSettings");
+ return guids.Select(guid => AssetDatabase.GUIDToAssetPath(guid)).ToArray();
+ }
+
+ private void Reinitialize()
+ {
+ if (m_Settings != null && MLAgentsSettingsManager.Settings != m_Settings)
+ {
+ InitializeWithCurrentSettings();
+ }
+ Repaint();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/MLAgentsSettingsProvider.cs.meta b/com.unity.ml-agents/Editor/MLAgentsSettingsProvider.cs.meta
new file mode 100644
index 0000000000..09eaa72b4e
--- /dev/null
+++ b/com.unity.ml-agents/Editor/MLAgentsSettingsProvider.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 162489862d7f64a40990a0c06bb73bd0
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/Match3ActuatorComponentEditor.cs b/com.unity.ml-agents/Editor/Match3ActuatorComponentEditor.cs
new file mode 100644
index 0000000000..0b072c5b52
--- /dev/null
+++ b/com.unity.ml-agents/Editor/Match3ActuatorComponentEditor.cs
@@ -0,0 +1,46 @@
+using UnityEditor;
+using Unity.MLAgents.Integrations.Match3;
+namespace Unity.MLAgents.Editor
+{
+ [CustomEditor(typeof(Match3ActuatorComponent), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class Match3ActuatorComponentEditor : UnityEditor.Editor
+ {
+ public override void OnInspectorGUI()
+ {
+ var so = serializedObject;
+ so.Update();
+
+ var component = (Match3ActuatorComponent)target;
+ var board = component.GetComponent();
+ if (board == null)
+ {
+ EditorGUILayout.HelpBox("You must provide an implementation of an AbstractBoard.", MessageType.Warning);
+ return;
+ }
+
+ // Drawing the RenderTextureComponent
+ EditorGUI.BeginChangeCheck();
+
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ EditorGUILayout.PropertyField(so.FindProperty("m_ActuatorName"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_RandomSeed"), true);
+ }
+ EditorGUI.EndDisabledGroup();
+ EditorGUILayout.PropertyField(so.FindProperty("m_ForceHeuristic"), true);
+
+ var requireSensorUpdate = EditorGUI.EndChangeCheck();
+ so.ApplyModifiedProperties();
+
+ if (requireSensorUpdate)
+ {
+ UpdateActuator();
+ }
+ }
+
+ void UpdateActuator()
+ {
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/Match3ActuatorComponentEditor.cs.meta b/com.unity.ml-agents/Editor/Match3ActuatorComponentEditor.cs.meta
new file mode 100644
index 0000000000..ce515a1234
--- /dev/null
+++ b/com.unity.ml-agents/Editor/Match3ActuatorComponentEditor.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: b545474cca77481bbc3c6c161dd6bbc3
+timeCreated: 1618441761
\ No newline at end of file
diff --git a/com.unity.ml-agents/Editor/Match3SensorComponentEditor.cs b/com.unity.ml-agents/Editor/Match3SensorComponentEditor.cs
new file mode 100644
index 0000000000..857bedeee8
--- /dev/null
+++ b/com.unity.ml-agents/Editor/Match3SensorComponentEditor.cs
@@ -0,0 +1,45 @@
+using UnityEditor;
+using Unity.MLAgents.Integrations.Match3;
+namespace Unity.MLAgents.Editor
+{
+ [CustomEditor(typeof(Match3SensorComponent), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class Match3SensorComponentEditor : UnityEditor.Editor
+ {
+ public override void OnInspectorGUI()
+ {
+ var so = serializedObject;
+ so.Update();
+
+ var component = (Match3SensorComponent)target;
+ var board = component.GetComponent();
+ if (board == null)
+ {
+ EditorGUILayout.HelpBox("You must provide an implementation of an AbstractBoard.", MessageType.Warning);
+ return;
+ }
+
+ // Drawing the RenderTextureComponent
+ EditorGUI.BeginChangeCheck();
+
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservationType"), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ var requireSensorUpdate = EditorGUI.EndChangeCheck();
+ so.ApplyModifiedProperties();
+
+ if (requireSensorUpdate)
+ {
+ UpdateSensor();
+ }
+ }
+
+ void UpdateSensor()
+ {
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/Match3SensorComponentEditor.cs.meta b/com.unity.ml-agents/Editor/Match3SensorComponentEditor.cs.meta
new file mode 100644
index 0000000000..82a80140ed
--- /dev/null
+++ b/com.unity.ml-agents/Editor/Match3SensorComponentEditor.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: ab55bf118d03479bb797c0037989c308
+timeCreated: 1618440499
\ No newline at end of file
diff --git a/com.unity.ml-agents/Editor/RayPerceptionSensorComponentBaseEditor.cs b/com.unity.ml-agents/Editor/RayPerceptionSensorComponentBaseEditor.cs
new file mode 100644
index 0000000000..4231c2776e
--- /dev/null
+++ b/com.unity.ml-agents/Editor/RayPerceptionSensorComponentBaseEditor.cs
@@ -0,0 +1,111 @@
+using UnityEngine;
+using UnityEditor;
+using Unity.MLAgents.Sensors;
+
+namespace Unity.MLAgents.Editor
+{
+ internal class RayPerceptionSensorComponentBaseEditor : UnityEditor.Editor
+ {
+ bool m_RequireSensorUpdate;
+
+ protected void OnRayPerceptionInspectorGUI(bool is3d)
+ {
+#if !MLA_UNITY_PHYSICS_MODULE
+ if (is3d)
+ {
+ EditorGUILayout.HelpBox("The Physics Module is not currently present. " +
+ "Please add it to your project in order to use the Ray Perception APIs in the " +
+ $"{nameof(RayPerceptionSensorComponent3D)}", MessageType.Warning);
+ }
+#endif
+#if !MLA_UNITY_PHYSICS2D_MODULE
+ if (!is3d)
+ {
+ EditorGUILayout.HelpBox("The Physics2D Module is not currently present. " +
+ "Please add it to your project in order to use the Ray Perception APIs in the " +
+ $"{nameof(RayPerceptionSensorComponent3D)}", MessageType.Warning);
+ }
+#endif
+ var so = serializedObject;
+ so.Update();
+
+ // Drawing the RayPerceptionSensorComponent
+ EditorGUI.BeginChangeCheck();
+ EditorGUI.indentLevel++;
+
+ // Don't allow certain fields to be modified during play mode.
+ // * SensorName affects the ordering of the Agent's observations
+ // * The number of tags and rays affects the size of the observations.
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_DetectableTags"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_RaysPerDirection"), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ EditorGUILayout.PropertyField(so.FindProperty("m_MaxRayDegrees"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_SphereCastRadius"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_RayLength"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_RayLayerMask"), true);
+
+ // Because the number of observation stacks affects the observation shape,
+ // it is not editable during play mode.
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservationStacks"), new GUIContent("Stacked Raycasts"), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ if (is3d)
+ {
+ EditorGUILayout.PropertyField(so.FindProperty("m_StartVerticalOffset"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_EndVerticalOffset"), true);
+ }
+
+ EditorGUILayout.PropertyField(so.FindProperty("m_AlternatingRayOrder"), true);
+
+ EditorGUILayout.PropertyField(so.FindProperty("rayHitColor"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("rayMissColor"), true);
+
+ EditorGUI.indentLevel--;
+ if (EditorGUI.EndChangeCheck())
+ {
+ m_RequireSensorUpdate = true;
+ }
+
+ so.ApplyModifiedProperties();
+ UpdateSensorIfDirty();
+ }
+
+ void UpdateSensorIfDirty()
+ {
+ if (m_RequireSensorUpdate)
+ {
+ var sensorComponent = serializedObject.targetObject as RayPerceptionSensorComponentBase;
+ sensorComponent?.UpdateSensor();
+ m_RequireSensorUpdate = false;
+ }
+ }
+ }
+
+ [CustomEditor(typeof(RayPerceptionSensorComponent2D), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class RayPerceptionSensorComponent2DEditor : RayPerceptionSensorComponentBaseEditor
+ {
+ public override void OnInspectorGUI()
+ {
+ OnRayPerceptionInspectorGUI(false);
+ }
+ }
+
+ [CustomEditor(typeof(RayPerceptionSensorComponent3D), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class RayPerceptionSensorComponent3DEditor : RayPerceptionSensorComponentBaseEditor
+ {
+ public override void OnInspectorGUI()
+ {
+ OnRayPerceptionInspectorGUI(true);
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/RayPerceptionSensorComponentBaseEditor.cs.meta b/com.unity.ml-agents/Editor/RayPerceptionSensorComponentBaseEditor.cs.meta
new file mode 100644
index 0000000000..c02a8ca2a9
--- /dev/null
+++ b/com.unity.ml-agents/Editor/RayPerceptionSensorComponentBaseEditor.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: c0182483e53c24d0e9f264f711ed89a9
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs b/com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs
new file mode 100644
index 0000000000..8e5fd892d3
--- /dev/null
+++ b/com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs
@@ -0,0 +1,43 @@
+using UnityEditor;
+using Unity.MLAgents.Sensors;
+namespace Unity.MLAgents.Editor
+{
+ [CustomEditor(typeof(RenderTextureSensorComponent), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class RenderTextureSensorComponentEditor : UnityEditor.Editor
+ {
+ public override void OnInspectorGUI()
+ {
+ var so = serializedObject;
+ so.Update();
+
+ // Drawing the RenderTextureComponent
+ EditorGUI.BeginChangeCheck();
+
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ EditorGUILayout.PropertyField(so.FindProperty("m_RenderTexture"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_Grayscale"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservationStacks"), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ EditorGUILayout.PropertyField(so.FindProperty("m_Compression"), true);
+
+ var requireSensorUpdate = EditorGUI.EndChangeCheck();
+ so.ApplyModifiedProperties();
+
+ if (requireSensorUpdate)
+ {
+ UpdateSensor();
+ }
+ }
+
+ void UpdateSensor()
+ {
+ var sensorComponent = serializedObject.targetObject as RenderTextureSensorComponent;
+ sensorComponent?.UpdateSensor();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs.meta b/com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs.meta
new file mode 100644
index 0000000000..fd7a57d05e
--- /dev/null
+++ b/com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: dab309e01d2964f0792de3ef914ca6b9
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/Unity.ML-Agents.Editor.asmdef b/com.unity.ml-agents/Editor/Unity.ML-Agents.Editor.asmdef
new file mode 100755
index 0000000000..27f67d0d1c
--- /dev/null
+++ b/com.unity.ml-agents/Editor/Unity.ML-Agents.Editor.asmdef
@@ -0,0 +1,30 @@
+{
+ "name": "Unity.ML-Agents.Editor",
+ "references": [
+ "Unity.ML-Agents",
+ "Unity.Barracuda",
+ "Unity.ML-Agents.CommunicatorObjects"
+ ],
+ "optionalUnityReferences": [],
+ "includePlatforms": [
+ "Editor"
+ ],
+ "excludePlatforms": [],
+ "allowUnsafeCode": false,
+ "overrideReferences": false,
+ "precompiledReferences": [],
+ "autoReferenced": true,
+ "defineConstraints": [],
+ "versionDefines": [
+ {
+ "name": "com.unity.modules.physics",
+ "expression": "1.0.0",
+ "define": "MLA_UNITY_PHYSICS_MODULE"
+ },
+ {
+ "name": "com.unity.modules.physics2d",
+ "expression": "1.0.0",
+ "define": "MLA_UNITY_PHYSICS2D_MODULE"
+ }
+ ]
+}
diff --git a/com.unity.ml-agents/Editor/Unity.ML-Agents.Editor.asmdef.meta b/com.unity.ml-agents/Editor/Unity.ML-Agents.Editor.asmdef.meta
new file mode 100644
index 0000000000..0c031cb370
--- /dev/null
+++ b/com.unity.ml-agents/Editor/Unity.ML-Agents.Editor.asmdef.meta
@@ -0,0 +1,7 @@
+fileFormatVersion: 2
+guid: 42675ddec8c314cf08d17ee0f6f5e5a5
+AssemblyDefinitionImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/UnityColors.colors b/com.unity.ml-agents/Editor/UnityColors.colors
new file mode 100644
index 0000000000..af773637c7
--- /dev/null
+++ b/com.unity.ml-agents/Editor/UnityColors.colors
@@ -0,0 +1,64 @@
+%YAML 1.1
+%TAG !u! tag:unity3d.com,2011:
+--- !u!114 &1
+MonoBehaviour:
+ m_ObjectHideFlags: 52
+ m_PrefabParentObject: {fileID: 0}
+ m_PrefabInternal: {fileID: 0}
+ m_GameObject: {fileID: 0}
+ m_Enabled: 1
+ m_EditorHideFlags: 1
+ m_Script: {fileID: 12323, guid: 0000000000000000e000000000000000, type: 0}
+ m_Name: UnityColors
+ m_EditorClassIdentifier:
+ m_Presets:
+ - m_Name:
+ m_Color: {r: 0.12941177, g: 0.5882353, b: 0.9529412, a: 1}
+ - m_Name:
+ m_Color: {r: 0, g: 0.34117648, b: 0.6039216, a: 1}
+ - m_Name:
+ m_Color: {r: 0.2627451, g: 0.7019608, b: 0.9019608, a: 1}
+ - m_Name:
+ m_Color: {r: 0.92156863, g: 0.25490198, b: 0.47843137, a: 1}
+ - m_Name:
+ m_Color: {r: 0.92941177, g: 0.3254902, b: 0.31764707, a: 1}
+ - m_Name:
+ m_Color: {r: 0.3647059, g: 0.41568628, b: 0.69411767, a: 1}
+ - m_Name:
+ m_Color: {r: 0.46666667, g: 0.5647059, b: 0.60784316, a: 1}
+ - m_Name:
+ m_Color: {r: 0.74509805, g: 0.7372549, b: 0.7411765, a: 1}
+ - m_Name:
+ m_Color: {r: 0.9254902, g: 0.9372549, b: 0.9411765, a: 1}
+ - m_Name:
+ m_Color: {r: 0.6039216, g: 0.31764707, b: 0.627451, a: 1}
+ - m_Name:
+ m_Color: {r: 0.2901961, g: 0.1764706, b: 0.5254902, a: 1}
+ - m_Name:
+ m_Color: {r: 0.4627451, g: 0.35686275, b: 0.654902, a: 1}
+ - m_Name:
+ m_Color: {r: 0.6039216, g: 0.31764707, b: 0.627451, a: 1}
+ - m_Name:
+ m_Color: {r: 0.20392157, g: 0.75686276, b: 0.8392157, a: 1}
+ - m_Name:
+ m_Color: {r: 0.1254902, g: 0.6509804, b: 0.60784316, a: 1}
+ - m_Name:
+ m_Color: {r: 0.39609292, g: 0.49962592, b: 0.6509434, a: 0}
+ - m_Name:
+ m_Color: {r: 0.40392157, g: 0.7372549, b: 0.41960785, a: 1}
+ - m_Name:
+ m_Color: {r: 0.60784316, g: 0.8039216, b: 0.39607844, a: 1}
+ - m_Name:
+ m_Color: {r: 0.8235294, g: 0.8784314, b: 0.34901962, a: 1}
+ - m_Name:
+ m_Color: {r: 1, g: 0.79607844, b: 0.15294118, a: 1}
+ - m_Name:
+ m_Color: {r: 1, g: 0.93333334, b: 0.34509805, a: 1}
+ - m_Name:
+ m_Color: {r: 0.98039216, g: 0.6509804, b: 0.16078432, a: 1}
+ - m_Name:
+ m_Color: {r: 0.9529412, g: 0.4392157, b: 0.27450982, a: 1}
+ - m_Name:
+ m_Color: {r: 0.74509805, g: 0.22745098, b: 0.15294118, a: 1}
+ - m_Name:
+ m_Color: {r: 0.9529412, g: 0.4392157, b: 0.27450982, a: 1}
diff --git a/com.unity.ml-agents/Editor/UnityColors.colors.meta b/com.unity.ml-agents/Editor/UnityColors.colors.meta
new file mode 100644
index 0000000000..34519c21ce
--- /dev/null
+++ b/com.unity.ml-agents/Editor/UnityColors.colors.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: b20b0226063034686a6cf92ade284285
+NativeFormatImporter:
+ externalObjects: {}
+ mainObjectFileID: 0
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Editor/VectorSensorComponentEditor.cs b/com.unity.ml-agents/Editor/VectorSensorComponentEditor.cs
new file mode 100644
index 0000000000..aae6fd796f
--- /dev/null
+++ b/com.unity.ml-agents/Editor/VectorSensorComponentEditor.cs
@@ -0,0 +1,31 @@
+using UnityEditor;
+using Unity.MLAgents.Sensors;
+
+namespace Unity.MLAgents.Editor
+{
+ [CustomEditor(typeof(VectorSensorComponent), editorForChildClasses: true)]
+ [CanEditMultipleObjects]
+ internal class VectorSensorComponentEditor : UnityEditor.Editor
+ {
+ public override void OnInspectorGUI()
+ {
+ var so = serializedObject;
+ so.Update();
+
+ // Drawing the VectorSensorComponent
+
+ EditorGUI.BeginDisabledGroup(!EditorUtilities.CanUpdateModelProperties());
+ {
+ // These fields affect the sensor order or observation size,
+ // So can't be changed at runtime.
+ EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservationSize"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservationType"), true);
+ EditorGUILayout.PropertyField(so.FindProperty("m_ObservationStacks"), true);
+ }
+ EditorGUI.EndDisabledGroup();
+
+ so.ApplyModifiedProperties();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Editor/VectorSensorComponentEditor.cs.meta b/com.unity.ml-agents/Editor/VectorSensorComponentEditor.cs.meta
new file mode 100644
index 0000000000..9862a23944
--- /dev/null
+++ b/com.unity.ml-agents/Editor/VectorSensorComponentEditor.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: aa0230c3402f04921acdbbdb61f6ff00
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/LICENSE.md b/com.unity.ml-agents/LICENSE.md
new file mode 100644
index 0000000000..42863a2c98
--- /dev/null
+++ b/com.unity.ml-agents/LICENSE.md
@@ -0,0 +1,202 @@
+com.unity.ml-agents copyright © 2017 Unity Technologies
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
diff --git a/com.unity.ml-agents/LICENSE.md.meta b/com.unity.ml-agents/LICENSE.md.meta
new file mode 100644
index 0000000000..0497eff447
--- /dev/null
+++ b/com.unity.ml-agents/LICENSE.md.meta
@@ -0,0 +1,7 @@
+fileFormatVersion: 2
+guid: 3b008ccfd571c4bc08e5ae283e73db3f
+TextScriptImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins.meta b/com.unity.ml-agents/Plugins.meta
new file mode 100644
index 0000000000..e4a9de128a
--- /dev/null
+++ b/com.unity.ml-agents/Plugins.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: 694794cb53c6c4bfc9b84ca5022f4ae2
+folderAsset: yes
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/IL2CPP.DL.Stubs.c b/com.unity.ml-agents/Plugins/IL2CPP.DL.Stubs.c
new file mode 100644
index 0000000000..843dafb645
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/IL2CPP.DL.Stubs.c
@@ -0,0 +1,10 @@
+// These stubs fix an issue compiling GRPC on Windows with IL2CPP.
+// For the moment, only Inference works. (training doesn't)
+
+void * dlopen(const char *filename, int flags) {
+ return 0;
+}
+
+void * dlsym(void *handle, const char *symbol) {
+ return 0;
+}
\ No newline at end of file
diff --git a/com.unity.ml-agents/Plugins/IL2CPP.DL.Stubs.c.meta b/com.unity.ml-agents/Plugins/IL2CPP.DL.Stubs.c.meta
new file mode 100644
index 0000000000..9f2b819fa0
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/IL2CPP.DL.Stubs.c.meta
@@ -0,0 +1,89 @@
+fileFormatVersion: 2
+guid: 3509a8908cf600c4f914a0705123a363
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 1
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ : Any
+ second:
+ enabled: 0
+ settings:
+ Exclude Editor: 1
+ Exclude Linux: 1
+ Exclude Linux64: 1
+ Exclude LinuxUniversal: 1
+ Exclude OSXUniversal: 1
+ Exclude Win: 0
+ Exclude Win64: 0
+ - first:
+ Any:
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ DefaultValueInitialized: true
+ OS: AnyOS
+ - first:
+ Facebook: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Facebook: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Linux
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: Linux64
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: LinuxUniversal
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: OSXUniversal
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: Win
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer.meta b/com.unity.ml-agents/Plugins/ProtoBuffer.meta
new file mode 100644
index 0000000000..af0fdcb105
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer.meta
@@ -0,0 +1,10 @@
+fileFormatVersion: 2
+guid: e44343d7e31b04d47bd5f7329c918ffe
+folderAsset: yes
+timeCreated: 1521839636
+licenseType: Free
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/Grpc.Core.dll b/com.unity.ml-agents/Plugins/ProtoBuffer/Grpc.Core.dll
new file mode 100644
index 0000000000..601f87c27a
Binary files /dev/null and b/com.unity.ml-agents/Plugins/ProtoBuffer/Grpc.Core.dll differ
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/Grpc.Core.dll.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/Grpc.Core.dll.meta
new file mode 100644
index 0000000000..1e82ae4a51
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/Grpc.Core.dll.meta
@@ -0,0 +1,118 @@
+fileFormatVersion: 2
+guid: cbf24ddeec4054edc9ad4c8295556878
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ : Any
+ second:
+ enabled: 0
+ settings:
+ Exclude Android: 1
+ Exclude CloudRendering: 1
+ Exclude Editor: 0
+ Exclude Linux: 0
+ Exclude Linux64: 0
+ Exclude LinuxUniversal: 0
+ Exclude OSXUniversal: 0
+ Exclude WebGL: 1
+ Exclude Win: 0
+ Exclude Win64: 0
+ Exclude iOS: 1
+ - first:
+ Android: Android
+ second:
+ enabled: 0
+ settings:
+ CPU: ARMv7
+ - first:
+ Any:
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ CloudRendering: CloudRendering
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ DefaultValueInitialized: true
+ OS: AnyOS
+ - first:
+ Facebook: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Facebook: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Linux
+ second:
+ enabled: 1
+ settings:
+ CPU: x86
+ - first:
+ Standalone: Linux64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: LinuxUniversal
+ second:
+ enabled: 1
+ settings: {}
+ - first:
+ Standalone: OSXUniversal
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Windows Store Apps: WindowsStoreApps
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ iPhone: iOS
+ second:
+ enabled: 0
+ settings:
+ AddToEmbeddedBinaries: false
+ CPU: AnyCPU
+ CompileFlags:
+ FrameworkDependencies:
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/System.Interactive.Async.dll b/com.unity.ml-agents/Plugins/ProtoBuffer/System.Interactive.Async.dll
new file mode 100755
index 0000000000..48efea419e
Binary files /dev/null and b/com.unity.ml-agents/Plugins/ProtoBuffer/System.Interactive.Async.dll differ
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/System.Interactive.Async.dll.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/System.Interactive.Async.dll.meta
new file mode 100644
index 0000000000..969150b326
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/System.Interactive.Async.dll.meta
@@ -0,0 +1,33 @@
+fileFormatVersion: 2
+guid: 9502ce7e38c5947dba996570732b6e9f
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ Any:
+ second:
+ enabled: 1
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 0
+ settings:
+ DefaultValueInitialized: true
+ - first:
+ Windows Store Apps: WindowsStoreApps
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/link.xml b/com.unity.ml-agents/Plugins/ProtoBuffer/link.xml
new file mode 100644
index 0000000000..857dfdd0a4
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/link.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/link.xml.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/link.xml.meta
new file mode 100644
index 0000000000..872460e078
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/link.xml.meta
@@ -0,0 +1,7 @@
+fileFormatVersion: 2
+guid: f94355fa6eab94c2d8529747b92ca3e1
+TextScriptImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes.meta
new file mode 100644
index 0000000000..6995400aec
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes.meta
@@ -0,0 +1,10 @@
+fileFormatVersion: 2
+guid: b8022add2e5264884a117894eeaf9809
+folderAsset: yes
+timeCreated: 1521595360
+licenseType: Free
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux.meta
new file mode 100644
index 0000000000..97848b1297
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux.meta
@@ -0,0 +1,10 @@
+fileFormatVersion: 2
+guid: 50c3602c6f6244621861928757e31463
+folderAsset: yes
+timeCreated: 1521595360
+licenseType: Free
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native.meta
new file mode 100644
index 0000000000..a8b33def01
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native.meta
@@ -0,0 +1,10 @@
+fileFormatVersion: 2
+guid: ba192b1e561564e1583e0a87334f8682
+folderAsset: yes
+timeCreated: 1521595360
+licenseType: Free
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x64.so b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x64.so
new file mode 100755
index 0000000000..9bf86dc2d7
Binary files /dev/null and b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x64.so differ
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x64.so.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x64.so.meta
new file mode 100644
index 0000000000..cf508374c6
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x64.so.meta
@@ -0,0 +1,113 @@
+fileFormatVersion: 2
+guid: c9d901caf522f4dc5815786fa764a5da
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ : Any
+ second:
+ enabled: 0
+ settings:
+ Exclude Android: 1
+ Exclude CloudRendering: 1
+ Exclude Editor: 0
+ Exclude Linux: 1
+ Exclude Linux64: 0
+ Exclude LinuxUniversal: 0
+ Exclude OSXUniversal: 1
+ Exclude WebGL: 1
+ Exclude Win: 0
+ Exclude Win64: 0
+ Exclude iOS: 1
+ - first:
+ Android: Android
+ second:
+ enabled: 0
+ settings:
+ CPU: ARMv7
+ - first:
+ Any:
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ CloudRendering: CloudRendering
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 1
+ settings:
+ CPU: x86_64
+ DefaultValueInitialized: true
+ OS: Linux
+ - first:
+ Facebook: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Facebook: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Linux
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: Linux64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: LinuxUniversal
+ second:
+ enabled: 1
+ settings:
+ CPU: x86_64
+ - first:
+ Standalone: OSXUniversal
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: Win
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ iPhone: iOS
+ second:
+ enabled: 0
+ settings:
+ AddToEmbeddedBinaries: false
+ CPU: AnyCPU
+ CompileFlags:
+ FrameworkDependencies:
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x86.so b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x86.so
new file mode 100755
index 0000000000..fce3041689
Binary files /dev/null and b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x86.so differ
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x86.so.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x86.so.meta
new file mode 100644
index 0000000000..a3592911d6
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/linux/native/libgrpc_csharp_ext.x86.so.meta
@@ -0,0 +1,113 @@
+fileFormatVersion: 2
+guid: 7dfb52431a6d941c89758cf0a217e3ab
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ : Any
+ second:
+ enabled: 0
+ settings:
+ Exclude Android: 1
+ Exclude CloudRendering: 1
+ Exclude Editor: 0
+ Exclude Linux: 0
+ Exclude Linux64: 0
+ Exclude LinuxUniversal: 0
+ Exclude OSXUniversal: 1
+ Exclude WebGL: 1
+ Exclude Win: 0
+ Exclude Win64: 0
+ Exclude iOS: 1
+ - first:
+ Android: Android
+ second:
+ enabled: 0
+ settings:
+ CPU: ARMv7
+ - first:
+ Any:
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ CloudRendering: CloudRendering
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 1
+ settings:
+ CPU: x86
+ DefaultValueInitialized: true
+ OS: Linux
+ - first:
+ Facebook: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Facebook: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Linux
+ second:
+ enabled: 1
+ settings:
+ CPU: x86
+ - first:
+ Standalone: Linux64
+ second:
+ enabled: 1
+ settings:
+ CPU: None
+ - first:
+ Standalone: LinuxUniversal
+ second:
+ enabled: 1
+ settings:
+ CPU: x86
+ - first:
+ Standalone: OSXUniversal
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: Win
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ iPhone: iOS
+ second:
+ enabled: 0
+ settings:
+ AddToEmbeddedBinaries: false
+ CPU: AnyCPU
+ CompileFlags:
+ FrameworkDependencies:
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx.meta
new file mode 100644
index 0000000000..69cbe8ef60
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx.meta
@@ -0,0 +1,10 @@
+fileFormatVersion: 2
+guid: f43fa6e62fb4c4105b270be1ae7bbbfd
+folderAsset: yes
+timeCreated: 1521595360
+licenseType: Free
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native.meta
new file mode 100644
index 0000000000..24fab959db
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native.meta
@@ -0,0 +1,10 @@
+fileFormatVersion: 2
+guid: 55aee008fb6a3411aa96f2f9911f9207
+folderAsset: yes
+timeCreated: 1521595360
+licenseType: Free
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native/libgrpc_csharp_ext.x64.bundle b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native/libgrpc_csharp_ext.x64.bundle
new file mode 100755
index 0000000000..440d2b9e33
Binary files /dev/null and b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native/libgrpc_csharp_ext.x64.bundle differ
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native/libgrpc_csharp_ext.x64.bundle.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native/libgrpc_csharp_ext.x64.bundle.meta
new file mode 100644
index 0000000000..2a1f0df2f5
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/osx/native/libgrpc_csharp_ext.x64.bundle.meta
@@ -0,0 +1,137 @@
+fileFormatVersion: 2
+guid: 7eeb863bd08ba4388829c23da03a714f
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ : Any
+ second:
+ enabled: 0
+ settings:
+ Exclude Android: 1
+ Exclude CloudRendering: 1
+ Exclude Editor: 0
+ Exclude Linux: 1
+ Exclude Linux64: 1
+ Exclude LinuxUniversal: 1
+ Exclude OSXIntel: 0
+ Exclude OSXIntel64: 0
+ Exclude OSXUniversal: 0
+ Exclude WebGL: 1
+ Exclude Win: 1
+ Exclude Win64: 1
+ Exclude iOS: 1
+ - first:
+ : OSXIntel
+ second:
+ enabled: 1
+ settings: {}
+ - first:
+ : OSXIntel64
+ second:
+ enabled: 1
+ settings: {}
+ - first:
+ Android: Android
+ second:
+ enabled: 0
+ settings:
+ CPU: ARMv7
+ - first:
+ Any:
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ CloudRendering: CloudRendering
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 1
+ settings:
+ CPU: x86_64
+ DefaultValueInitialized: true
+ OS: OSX
+ - first:
+ Facebook: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Facebook: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Linux
+ second:
+ enabled: 0
+ settings:
+ CPU: x86
+ - first:
+ Standalone: Linux64
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: LinuxUniversal
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: OSXIntel
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: OSXIntel64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: OSXUniversal
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ iPhone: iOS
+ second:
+ enabled: 0
+ settings:
+ AddToEmbeddedBinaries: false
+ CPU: AnyCPU
+ CompileFlags:
+ FrameworkDependencies:
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win.meta
new file mode 100644
index 0000000000..b1e54c9a48
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win.meta
@@ -0,0 +1,10 @@
+fileFormatVersion: 2
+guid: a961485c3484a4002ac4961a8481f6cc
+folderAsset: yes
+timeCreated: 1521595360
+licenseType: Free
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native.meta
new file mode 100644
index 0000000000..42e4968ae5
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native.meta
@@ -0,0 +1,10 @@
+fileFormatVersion: 2
+guid: af9f9f367bbc543b8ba41e58dcdd6e66
+folderAsset: yes
+timeCreated: 1521595360
+licenseType: Free
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x64.dll b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x64.dll
new file mode 100755
index 0000000000..b2e48711b8
Binary files /dev/null and b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x64.dll differ
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x64.dll.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x64.dll.meta
new file mode 100644
index 0000000000..56500dc9b6
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x64.dll.meta
@@ -0,0 +1,105 @@
+fileFormatVersion: 2
+guid: f4d9429fe43154fbd9d158c129e0ff33
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ : Any
+ second:
+ enabled: 0
+ settings:
+ Exclude Android: 1
+ Exclude Editor: 0
+ Exclude Linux: 0
+ Exclude Linux64: 0
+ Exclude LinuxUniversal: 0
+ Exclude OSXUniversal: 0
+ Exclude Win: 1
+ Exclude Win64: 0
+ Exclude iOS: 1
+ - first:
+ Android: Android
+ second:
+ enabled: 0
+ settings:
+ CPU: ARMv7
+ - first:
+ Any:
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 1
+ settings:
+ CPU: x86_64
+ DefaultValueInitialized: true
+ OS: Windows
+ - first:
+ Facebook: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Facebook: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Linux
+ second:
+ enabled: 1
+ settings:
+ CPU: x86
+ - first:
+ Standalone: Linux64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: LinuxUniversal
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: OSXUniversal
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: Win64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ iPhone: iOS
+ second:
+ enabled: 0
+ settings:
+ AddToEmbeddedBinaries: false
+ CompileFlags:
+ FrameworkDependencies:
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x86.dll b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x86.dll
new file mode 100755
index 0000000000..45d5c324a3
Binary files /dev/null and b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x86.dll differ
diff --git a/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x86.dll.meta b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x86.dll.meta
new file mode 100644
index 0000000000..77354acf46
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/ProtoBuffer/runtimes/win/native/grpc_csharp_ext.x86.dll.meta
@@ -0,0 +1,105 @@
+fileFormatVersion: 2
+guid: d74134114def74fb4ae781c015deaa95
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ : Any
+ second:
+ enabled: 0
+ settings:
+ Exclude Android: 1
+ Exclude Editor: 0
+ Exclude Linux: 0
+ Exclude Linux64: 0
+ Exclude LinuxUniversal: 0
+ Exclude OSXUniversal: 0
+ Exclude Win: 0
+ Exclude Win64: 1
+ Exclude iOS: 1
+ - first:
+ Android: Android
+ second:
+ enabled: 0
+ settings:
+ CPU: ARMv7
+ - first:
+ Any:
+ second:
+ enabled: 0
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 1
+ settings:
+ CPU: x86
+ DefaultValueInitialized: true
+ OS: Windows
+ - first:
+ Facebook: Win
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ - first:
+ Facebook: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ Standalone: Linux
+ second:
+ enabled: 1
+ settings:
+ CPU: x86
+ - first:
+ Standalone: Linux64
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: LinuxUniversal
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: OSXUniversal
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win
+ second:
+ enabled: 1
+ settings:
+ CPU: AnyCPU
+ - first:
+ Standalone: Win64
+ second:
+ enabled: 0
+ settings:
+ CPU: None
+ - first:
+ iPhone: iOS
+ second:
+ enabled: 0
+ settings:
+ AddToEmbeddedBinaries: false
+ CompileFlags:
+ FrameworkDependencies:
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/System.IO.Abstractions.TestingHelpers.dll b/com.unity.ml-agents/Plugins/System.IO.Abstractions.TestingHelpers.dll
new file mode 100755
index 0000000000..0d2b68f2e8
Binary files /dev/null and b/com.unity.ml-agents/Plugins/System.IO.Abstractions.TestingHelpers.dll differ
diff --git a/com.unity.ml-agents/Plugins/System.IO.Abstractions.TestingHelpers.dll.meta b/com.unity.ml-agents/Plugins/System.IO.Abstractions.TestingHelpers.dll.meta
new file mode 100644
index 0000000000..c6d910d8ee
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/System.IO.Abstractions.TestingHelpers.dll.meta
@@ -0,0 +1,33 @@
+fileFormatVersion: 2
+guid: 2d7ba4e1037b64de5b860bcbe15755b3
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ Any:
+ second:
+ enabled: 1
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 0
+ settings:
+ DefaultValueInitialized: true
+ - first:
+ Windows Store Apps: WindowsStoreApps
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Plugins/System.IO.Abstractions.dll b/com.unity.ml-agents/Plugins/System.IO.Abstractions.dll
new file mode 100755
index 0000000000..4fe6ccbf43
Binary files /dev/null and b/com.unity.ml-agents/Plugins/System.IO.Abstractions.dll differ
diff --git a/com.unity.ml-agents/Plugins/System.IO.Abstractions.dll.meta b/com.unity.ml-agents/Plugins/System.IO.Abstractions.dll.meta
new file mode 100644
index 0000000000..5432c24e8a
--- /dev/null
+++ b/com.unity.ml-agents/Plugins/System.IO.Abstractions.dll.meta
@@ -0,0 +1,33 @@
+fileFormatVersion: 2
+guid: b01205587773841ad95e8ceda347e8bd
+PluginImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ iconMap: {}
+ executionOrder: {}
+ defineConstraints: []
+ isPreloaded: 0
+ isOverridable: 0
+ isExplicitlyReferenced: 0
+ validateReferences: 1
+ platformData:
+ - first:
+ Any:
+ second:
+ enabled: 1
+ settings: {}
+ - first:
+ Editor: Editor
+ second:
+ enabled: 0
+ settings:
+ DefaultValueInitialized: true
+ - first:
+ Windows Store Apps: WindowsStoreApps
+ second:
+ enabled: 0
+ settings:
+ CPU: AnyCPU
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/README.md b/com.unity.ml-agents/README.md
new file mode 100644
index 0000000000..ae5c0e2c8e
--- /dev/null
+++ b/com.unity.ml-agents/README.md
@@ -0,0 +1,15 @@
+# com.unity.ml-agents
+
+ML-Agents is a Unity package that allows users to use state-of-the-art machine learning to create intelligent character behaviors in any Unity environment (games, robotics, film, etc.).
+
+## Installation
+
+Please refer to the [ML-Agents github repo] for installation instructions.
+
+## Usage
+
+Please refer to the [ML-Agents documentation] page for usage guides.
+
+
+[ML-Agents github repo]: https://github.com/Unity-Technologies/ml-agents
+[ML-Agents documentation]: https://unity-technologies.github.io/ml-agents/
\ No newline at end of file
diff --git a/com.unity.ml-agents/README.md.meta b/com.unity.ml-agents/README.md.meta
new file mode 100644
index 0000000000..bbb2279ba2
--- /dev/null
+++ b/com.unity.ml-agents/README.md.meta
@@ -0,0 +1,7 @@
+fileFormatVersion: 2
+guid: 940521c5d10354cde82c2d572d170c97
+TextScriptImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime.meta b/com.unity.ml-agents/Runtime.meta
new file mode 100644
index 0000000000..b5ab5034ab
--- /dev/null
+++ b/com.unity.ml-agents/Runtime.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: a3a287cfa95bf4bdcad4997f7d48153b
+folderAsset: yes
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Academy.cs b/com.unity.ml-agents/Runtime/Academy.cs
new file mode 100644
index 0000000000..515ccae2bf
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Academy.cs
@@ -0,0 +1,695 @@
+using System;
+using UnityEngine;
+using System.Collections.Generic;
+#if UNITY_EDITOR
+using UnityEditor;
+#endif
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.Inference;
+using Unity.MLAgents.Policies;
+using Unity.MLAgents.SideChannels;
+using Unity.Barracuda;
+
+/**
+ * Welcome to Unity Machine Learning Agents (ML-Agents).
+ *
+ * The ML-Agents toolkit contains four entities: Academy, Agent, Communicator and
+ * Python API. The academy and connected agents live within
+ * a learning environment (herein called Environment), while the communicator
+ * manages the communication between the learning environment and the Python
+ * API. For more information on each of these entities, in addition to how to
+ * set-up a learning environment and train the behavior of characters in a
+ * Unity scene, please browse our documentation pages on GitHub:
+ * https://github.com/Unity-Technologies/ml-agents/tree/release_19_docs/docs/
+ */
+
+namespace Unity.MLAgents
+{
+ ///
+ /// Helper class to step the Academy during FixedUpdate phase.
+ ///
+ internal class AcademyFixedUpdateStepper : MonoBehaviour
+ {
+ void FixedUpdate()
+ {
+ // Check if the stepper belongs to the current Academy and destroy it if it's not.
+ // This is to prevent from having leaked stepper from previous runs.
+ if (!Academy.IsInitialized || !Academy.Instance.IsStepperOwner(this))
+ {
+ Destroy(this.gameObject);
+ }
+ else
+ {
+ Academy.Instance.EnvironmentStep();
+ }
+ }
+ }
+
+ ///
+ /// The Academy singleton manages agent training and decision making.
+ ///
+ ///
+ /// Access the Academy singleton through the
+ /// property. The Academy instance is initialized the first time it is accessed (which will
+ /// typically be by the first initialized in a scene).
+ ///
+ /// At initialization, the Academy attempts to connect to the Python training process through
+ /// the external communicator. If successful, the training process can train
+ /// instances. When you set an agent's setting
+ /// to , the agent exchanges data with the training process
+ /// to make decisions. If no training process is available, agents with the default behavior
+ /// fall back to inference or heuristic decisions. (You can also set agents to always use
+ /// inference or heuristics.)
+ ///
+ [HelpURL("https://github.com/Unity-Technologies/ml-agents/tree/release_19_docs/" +
+ "docs/Learning-Environment-Design.md")]
+ public class Academy : IDisposable
+ {
+ ///
+ /// Communication protocol version.
+ /// When connecting to python, this must be compatible with UnityEnvironment.API_VERSION.
+ /// We follow semantic versioning on the communication version, so existing
+ /// functionality will work as long the major versions match.
+ /// This should be changed whenever a change is made to the communication protocol.
+ ///
+ ///
+ /// History:
+ ///
+ /// -
+ /// 1.0.0
+ /// Initial version
+ ///
+ /// -
+ /// 1.1.0
+ /// Support concatenated PNGs for compressed observations.
+ ///
+ /// -
+ /// 1.2.0
+ /// Support compression mapping for stacked compressed observations.
+ ///
+ /// -
+ /// 1.3.0
+ /// Support both continuous and discrete actions.
+ ///
+ /// -
+ /// 1.4.0
+ /// Support training analytics sent from python trainer to the editor.
+ ///
+ /// -
+ /// 1.5.0
+ /// Support variable length observation training and multi-agent groups.
+ ///
+ ///
+ ///
+ const string k_ApiVersion = "1.5.0";
+
+ ///
+ /// Unity package version of com.unity.ml-agents.
+ /// This must match the version string in package.json and is checked in a unit test.
+ ///
+ internal const string k_PackageVersion = "2.3.0-exp.3";
+
+ const int k_EditorTrainingPort = 5004;
+
+ const string k_PortCommandLineFlag = "--mlagents-port";
+
+ // Lazy initializer pattern, see https://csharpindepth.com/articles/singleton#lazy
+ static Lazy s_Lazy = new Lazy(() => new Academy());
+
+ ///
+ ///Reports whether the Academy has been initialized yet.
+ ///
+ /// True if the Academy is initialized, false otherwise.
+ public static bool IsInitialized
+ {
+ get { return s_Lazy.IsValueCreated; }
+ }
+
+ ///
+ /// The singleton Academy object.
+ ///
+ /// Getting the instance initializes the Academy, if necessary.
+ public static Academy Instance { get { return s_Lazy.Value; } }
+
+ // Fields not provided in the Inspector.
+
+ ///
+ /// Reports whether or not the communicator is on.
+ ///
+ ///
+ ///
+ /// True, if communicator is on, false otherwise.
+ ///
+ public bool IsCommunicatorOn
+ {
+ get { return Communicator != null; }
+ }
+
+ /// The number of episodes completed by the environment. Incremented
+ /// each time the environment is reset.
+ int m_EpisodeCount;
+
+ /// The number of steps completed within the current episode. Incremented
+ /// each time a step is taken in the environment. Is reset to 0 during
+ /// .
+ int m_StepCount;
+
+ /// The number of total number of steps completed during the whole simulation. Incremented
+ /// each time a step is taken in the environment.
+ int m_TotalStepCount;
+
+ /// Pointer to the communicator currently in use by the Academy.
+ internal ICommunicator Communicator;
+
+ bool m_Initialized;
+ List m_ModelRunners = new List();
+
+ // Flag used to keep track of the first time the Academy is reset.
+ bool m_HadFirstReset;
+
+ // Detect an Academy step called by user code that is also called by the Academy.
+ private RecursionChecker m_StepRecursionChecker = new RecursionChecker("EnvironmentStep");
+
+ // Random seed used for inference.
+ int m_InferenceSeed;
+
+ ///
+ /// Set the random seed used for inference. This should be set before any Agents are added
+ /// to the scene. The seed is passed to the ModelRunner constructor, and incremented each
+ /// time a new ModelRunner is created.
+ ///
+ public int InferenceSeed
+ {
+ set { m_InferenceSeed = value; }
+ }
+
+ int m_NumAreas;
+
+ ///
+ /// Number of training areas to instantiate.
+ ///
+ public int NumAreas => m_NumAreas;
+
+ ///
+ /// Returns the RLCapabilities of the python client that the unity process is connected to.
+ ///
+ internal UnityRLCapabilities TrainerCapabilities { get; set; }
+
+
+ // The Academy uses a series of events to communicate with agents
+ // to facilitate synchronization. More specifically, it ensures
+ // that all the agents perform their steps in a consistent order (i.e. no
+ // agent can act based on a decision before another agent has had a chance
+ // to request a decision).
+
+ // Signals to all the Agents at each environment step so they can use
+ // their Policy to decide on their next action.
+ internal event Action DecideAction;
+
+ // Signals to all the listeners that the academy is being destroyed
+ internal event Action DestroyAction;
+
+ // Signals to the Agent that a new step is about to start.
+ // This will mark the Agent as Done if it has reached its maxSteps.
+ internal event Action AgentIncrementStep;
+
+
+ ///
+ /// Signals to all of the s that their step is about to begin.
+ /// This is a good time for an to decide if it would like to
+ /// call or
+ /// for this step. Any other pre-step setup could be done during this event as well.
+ ///
+ public event Action AgentPreStep;
+
+ // Signals to all the agents at each environment step so they can send
+ // their state to their Policy if they have requested a decision.
+ internal event Action AgentSendState;
+
+ // Signals to all the agents at each environment step so they can act if
+ // they have requested a decision.
+ internal event Action AgentAct;
+
+ // Signals to all the agents each time the Academy force resets.
+ internal event Action AgentForceReset;
+
+ ///
+ /// Signals that the Academy has been reset by the training process.
+ ///
+ public event Action OnEnvironmentReset;
+
+ AcademyFixedUpdateStepper m_FixedUpdateStepper;
+ GameObject m_StepperObject;
+
+
+ ///
+ /// Private constructor called the first time the Academy is used.
+ /// Academy uses this time to initialize internal data
+ /// structures, initialize the environment and check for the existence
+ /// of a communicator.
+ ///
+ protected Academy()
+ {
+ Application.quitting += Dispose;
+#if UNITY_EDITOR || UNITY_STANDALONE
+ if (!CommunicatorFactory.CommunicatorRegistered)
+ {
+ Debug.Log("Registered Communicator in Academy.");
+ CommunicatorFactory.Register(RpcCommunicator.Create);
+ }
+#endif
+ LazyInitialize();
+
+#if UNITY_EDITOR
+ EditorApplication.playModeStateChanged += HandleOnPlayModeChanged;
+#endif
+ }
+
+#if UNITY_EDITOR
+ ///
+ /// Clean up the Academy when switching from edit mode to play mode
+ ///
+ /// State.
+ void HandleOnPlayModeChanged(PlayModeStateChange state)
+ {
+ if (state == PlayModeStateChange.ExitingEditMode)
+ {
+ Dispose();
+ }
+ }
+
+#endif
+
+ ///
+ /// Initialize the Academy if it hasn't already been initialized.
+ /// This method is always safe to call; it will have no effect if the Academy is already
+ /// initialized.
+ ///
+ internal void LazyInitialize()
+ {
+ if (!m_Initialized)
+ {
+ InitializeEnvironment();
+ m_Initialized = true;
+ }
+ }
+
+ ///
+ /// Enable stepping of the Academy during the FixedUpdate phase. This is done by creating
+ /// a temporary GameObject with a MonoBehaviour that calls Academy.EnvironmentStep().
+ ///
+ void EnableAutomaticStepping()
+ {
+ if (m_FixedUpdateStepper != null)
+ {
+ return;
+ }
+
+ m_StepperObject = new GameObject("AcademyFixedUpdateStepper");
+ // Don't show this object in the hierarchy
+ m_StepperObject.hideFlags = HideFlags.HideInHierarchy;
+ m_FixedUpdateStepper = m_StepperObject.AddComponent();
+ try
+ {
+ // This try-catch is because DontDestroyOnLoad cannot be used in Editor Tests
+ GameObject.DontDestroyOnLoad(m_StepperObject);
+ }
+ catch { }
+ }
+
+ ///
+ /// Disable stepping of the Academy during the FixedUpdate phase. If this is called, the Academy must be
+ /// stepped manually by the user by calling Academy.EnvironmentStep().
+ ///
+ void DisableAutomaticStepping()
+ {
+ if (m_FixedUpdateStepper == null)
+ {
+ return;
+ }
+
+ m_FixedUpdateStepper = null;
+ if (Application.isEditor)
+ {
+ UnityEngine.Object.DestroyImmediate(m_StepperObject);
+ }
+ else
+ {
+ UnityEngine.Object.Destroy(m_StepperObject);
+ }
+
+ m_StepperObject = null;
+ }
+
+ ///
+ /// Determines whether or not the Academy is automatically stepped during the FixedUpdate phase.
+ ///
+ /// Set true to enable automatic stepping; false to disable.
+ public bool AutomaticSteppingEnabled
+ {
+ get { return m_FixedUpdateStepper != null; }
+ set
+ {
+ if (value)
+ {
+ EnableAutomaticStepping();
+ }
+ else
+ {
+ DisableAutomaticStepping();
+ }
+ }
+ }
+
+ // Used to read Python-provided environment parameters
+ static int ReadPortFromArgs()
+ {
+ var args = Environment.GetCommandLineArgs();
+ var inputPort = "";
+ for (var i = 0; i < args.Length; i++)
+ {
+ if (args[i] == k_PortCommandLineFlag)
+ {
+ inputPort = args[i + 1];
+ }
+ }
+
+ try
+ {
+ return int.Parse(inputPort);
+ }
+ catch
+ {
+ // No arg passed, or malformed port number.
+#if UNITY_EDITOR
+ // Try connecting on the default editor port
+ return MLAgentsSettingsManager.Settings.ConnectTrainer ? MLAgentsSettingsManager.Settings.EditorPort : -1;
+#else
+ // This is an executable, so we don't try to connect.
+ return -1;
+#endif
+ }
+ }
+
+ EnvironmentParameters m_EnvironmentParameters;
+ StatsRecorder m_StatsRecorder;
+
+ ///
+ /// Returns the instance. If training
+ /// features such as Curriculum Learning or Environment Parameter Randomization are used,
+ /// then the values of the parameters generated from the training process can be
+ /// retrieved here.
+ ///
+ ///
+ public EnvironmentParameters EnvironmentParameters
+ {
+ get { return m_EnvironmentParameters; }
+ }
+
+ ///
+ /// Returns the instance. This instance can be used
+ /// to record any statistics from the Unity environment.
+ ///
+ ///
+ public StatsRecorder StatsRecorder
+ {
+ get { return m_StatsRecorder; }
+ }
+
+ ///
+ /// Initializes the environment, configures it and initializes the Academy.
+ ///
+ void InitializeEnvironment()
+ {
+ TimerStack.Instance.AddMetadata("communication_protocol_version", k_ApiVersion);
+ TimerStack.Instance.AddMetadata("com.unity.ml-agents_version", k_PackageVersion);
+
+ EnableAutomaticStepping();
+
+ SideChannelManager.RegisterSideChannel(new EngineConfigurationChannel());
+ SideChannelManager.RegisterSideChannel(new TrainingAnalyticsSideChannel());
+ m_EnvironmentParameters = new EnvironmentParameters();
+ m_StatsRecorder = new StatsRecorder();
+
+ // Try to launch the communicator by using the arguments passed at launch
+ var port = ReadPortFromArgs();
+ if (port > 0)
+ {
+ Communicator = CommunicatorFactory.Create();
+ }
+
+ if (Communicator == null && CommunicatorFactory.Enabled && port > 0)
+ {
+ Debug.Log("Communicator failed to start!");
+ }
+
+ if (Communicator != null)
+ {
+ // We try to exchange the first message with Python. If this fails, it means
+ // no Python Process is ready to train the environment. In this case, the
+ // environment must use Inference.
+ bool initSuccessful = false;
+ var communicatorInitParams = new CommunicatorInitParameters
+ {
+ port = port,
+ unityCommunicationVersion = k_ApiVersion,
+ unityPackageVersion = k_PackageVersion,
+ name = "AcademySingleton",
+ CSharpCapabilities = new UnityRLCapabilities()
+ };
+
+ try
+ {
+ initSuccessful = Communicator.Initialize(
+ communicatorInitParams,
+ out var unityRlInitParameters
+ );
+ if (initSuccessful)
+ {
+ UnityEngine.Random.InitState(unityRlInitParameters.seed);
+ // We might have inference-only Agents, so set the seed for them too.
+ m_InferenceSeed = unityRlInitParameters.seed;
+ m_NumAreas = unityRlInitParameters.numAreas;
+ TrainerCapabilities = unityRlInitParameters.TrainerCapabilities;
+ TrainerCapabilities.WarnOnPythonMissingBaseRLCapabilities();
+ }
+ else
+ {
+ Debug.Log($"Couldn't connect to trainer on port {port} using API version {k_ApiVersion}. Will perform inference instead.");
+ Communicator = null;
+ }
+ }
+ catch (Exception ex)
+ {
+ Debug.Log($"Unexpected exception when trying to initialize communication: {ex}\nWill perform inference instead.");
+ Communicator = null;
+ }
+ }
+
+ if (Communicator != null)
+ {
+ Communicator.QuitCommandReceived += OnQuitCommandReceived;
+ Communicator.ResetCommandReceived += OnResetCommand;
+ }
+
+ // If a communicator is enabled/provided, then we assume we are in
+ // training mode. In the absence of a communicator, we assume we are
+ // in inference mode.
+
+ ResetActions();
+ }
+
+ void ResetActions()
+ {
+ DecideAction = () => { };
+ DestroyAction = () => { };
+ AgentPreStep = i => { };
+ AgentSendState = () => { };
+ AgentAct = () => { };
+ AgentForceReset = () => { };
+ OnEnvironmentReset = () => { };
+ }
+
+ static void OnQuitCommandReceived()
+ {
+#if UNITY_EDITOR
+ EditorApplication.isPlaying = false;
+#endif
+ Application.Quit();
+ }
+
+ void OnResetCommand()
+ {
+ ForcedFullReset();
+ }
+
+ ///
+ /// The current episode count.
+ ///
+ ///
+ /// Current episode number.
+ ///
+ public int EpisodeCount
+ {
+ get { return m_EpisodeCount; }
+ }
+
+ ///
+ /// The current step count (within the current episode).
+ ///
+ ///
+ /// Current step count.
+ ///
+ public int StepCount
+ {
+ get { return m_StepCount; }
+ }
+
+ ///
+ /// Returns the total step count.
+ ///
+ ///
+ /// Total step count.
+ ///
+ public int TotalStepCount
+ {
+ get { return m_TotalStepCount; }
+ }
+
+ ///
+ /// Forces the full reset. The done flags are not affected. Is either
+ /// called the first reset at inference and every external reset
+ /// at training.
+ ///
+ void ForcedFullReset()
+ {
+ EnvironmentReset();
+ AgentForceReset?.Invoke();
+ m_HadFirstReset = true;
+ }
+
+ ///
+ /// Performs a single environment update of the Academy and Agent
+ /// objects within the environment.
+ ///
+ public void EnvironmentStep()
+ {
+ using (m_StepRecursionChecker.Start())
+ {
+ if (!m_HadFirstReset)
+ {
+ ForcedFullReset();
+ }
+
+ AgentPreStep?.Invoke(m_StepCount);
+
+ m_StepCount += 1;
+ m_TotalStepCount += 1;
+ AgentIncrementStep?.Invoke();
+
+ using (TimerStack.Instance.Scoped("AgentSendState"))
+ {
+ AgentSendState?.Invoke();
+ }
+
+ using (TimerStack.Instance.Scoped("DecideAction"))
+ {
+ DecideAction?.Invoke();
+ }
+
+ // If the communicator is not on, we need to clear the SideChannel sending queue
+ if (!IsCommunicatorOn)
+ {
+ SideChannelManager.GetSideChannelMessage();
+ }
+
+ using (TimerStack.Instance.Scoped("AgentAct"))
+ {
+ AgentAct?.Invoke();
+ }
+ }
+ }
+
+ ///
+ /// Resets the environment, including the Academy.
+ ///
+ void EnvironmentReset()
+ {
+ m_StepCount = 0;
+ m_EpisodeCount++;
+ OnEnvironmentReset?.Invoke();
+ }
+
+ ///
+ /// Creates or retrieves an existing ModelRunner that uses the same
+ /// NNModel and the InferenceDevice as provided.
+ ///
+ /// The NNModel the ModelRunner must use.
+ /// Description of the actions for the Agent.
+ ///
+ /// The inference device (CPU or GPU) the ModelRunner will use.
+ ///
+ /// Inference only: set to true if the action selection from model should be
+ /// Deterministic.
+ /// The ModelRunner compatible with the input settings.
+ internal ModelRunner GetOrCreateModelRunner(
+ NNModel model, ActionSpec actionSpec, InferenceDevice inferenceDevice, bool deterministicInference = false)
+ {
+ var modelRunner = m_ModelRunners.Find(x => x.HasModel(model, inferenceDevice));
+ if (modelRunner == null)
+ {
+ modelRunner = new ModelRunner(model, actionSpec, inferenceDevice, m_InferenceSeed, deterministicInference);
+ m_ModelRunners.Add(modelRunner);
+ m_InferenceSeed++;
+ }
+ return modelRunner;
+ }
+
+ ///
+ /// Shut down the Academy.
+ ///
+ public void Dispose()
+ {
+ DisableAutomaticStepping();
+
+ // Signal to listeners that the academy is being destroyed now
+ DestroyAction?.Invoke();
+
+ Communicator?.Dispose();
+ Communicator = null;
+
+ m_EnvironmentParameters.Dispose();
+ m_StatsRecorder.Dispose();
+ SideChannelManager.UnregisterAllSideChannels(); // unregister custom side channels
+
+ if (m_ModelRunners != null)
+ {
+ foreach (var mr in m_ModelRunners)
+ {
+ mr.Dispose();
+ }
+
+ m_ModelRunners = null;
+ }
+
+ // Clear out the actions so we're not keeping references to any old objects
+ ResetActions();
+
+ // TODO - Pass worker ID or some other identifier,
+ // so that multiple envs won't overwrite each others stats.
+ TimerStack.Instance.SaveJsonTimers();
+ m_Initialized = false;
+
+ // Reset the Lazy instance
+ s_Lazy = new Lazy(() => new Academy());
+ }
+
+ ///
+ /// Check if the input AcademyFixedUpdateStepper belongs to this Academy.
+ ///
+ internal bool IsStepperOwner(AcademyFixedUpdateStepper stepper)
+ {
+ return GameObject.ReferenceEquals(stepper.gameObject, Academy.Instance.m_StepperObject);
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Academy.cs.meta b/com.unity.ml-agents/Runtime/Academy.cs.meta
new file mode 100755
index 0000000000..b0a5b6ffc4
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Academy.cs.meta
@@ -0,0 +1,12 @@
+fileFormatVersion: 2
+guid: b1fc0029fee784d9cb9854f8912bfd07
+timeCreated: 1503613254
+licenseType: Free
+MonoImporter:
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Actuators.meta b/com.unity.ml-agents/Runtime/Actuators.meta
new file mode 100644
index 0000000000..96bbfb99b3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: 26733e59183b6479e8f0e892a8bf09a4
+folderAsset: yes
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActionSegment.cs b/com.unity.ml-agents/Runtime/Actuators/ActionSegment.cs
new file mode 100644
index 0000000000..b3026eac39
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActionSegment.cs
@@ -0,0 +1,234 @@
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// ActionSegment{T} is a data structure that allows access to a segment of an underlying array
+ /// in order to avoid the copying and allocation of sub-arrays. The segment is defined by
+ /// the offset into the original array, and an length.
+ ///
+ /// The type of object stored in the underlying
+ public readonly struct ActionSegment : IEnumerable, IEquatable>
+ where T : struct
+ {
+ ///
+ /// The zero-based offset into the original array at which this segment starts.
+ ///
+ public readonly int Offset;
+
+ ///
+ /// The number of items this segment can access in the underlying array.
+ ///
+ public readonly int Length;
+
+ ///
+ /// An Empty segment which has an offset of 0, a Length of 0, and it's underlying array
+ /// is also empty.
+ ///
+ public static ActionSegment Empty = new ActionSegment(System.Array.Empty(), 0, 0);
+
+ static void CheckParameters(IReadOnlyCollection actionArray, int offset, int length)
+ {
+#if DEBUG
+ if (offset + length > actionArray.Count)
+ {
+ throw new ArgumentOutOfRangeException(nameof(offset),
+ $"Arguments offset: {offset} and length: {length} " +
+ $"are out of bounds of actionArray: {actionArray.Count}.");
+ }
+#endif
+ }
+
+ ///
+ /// Construct an with just an actionArray. The will
+ /// be set to 0 and the will be set to `actionArray.Length`.
+ ///
+ /// The action array to use for the this segment.
+ public ActionSegment(T[] actionArray)
+ : this(actionArray ?? System.Array.Empty(), 0, actionArray?.Length ?? 0) { }
+
+ ///
+ /// Construct an with an underlying array
+ /// and offset, and a length.
+ ///
+ /// The underlying array which this segment has a view into
+ /// The zero-based offset into the underlying array.
+ /// The length of the segment.
+ public ActionSegment(T[] actionArray, int offset, int length)
+ {
+#if DEBUG
+ CheckParameters(actionArray ?? System.Array.Empty(), offset, length);
+#endif
+ Array = actionArray ?? System.Array.Empty();
+ Offset = offset;
+ Length = length;
+ }
+
+ ///
+ /// Get the underlying of this segment.
+ ///
+ public T[] Array { get; }
+
+ ///
+ /// Allows access to the underlying array using array syntax.
+ ///
+ /// The zero-based index of the segment.
+ /// Thrown when the index is less than 0 or
+ /// greater than or equal to
+ public T this[int index]
+ {
+ get
+ {
+ if (index < 0 || index > Length)
+ {
+ throw new IndexOutOfRangeException($"Index out of bounds, expected a number between 0 and {Length}");
+ }
+ return Array[Offset + index];
+ }
+ set
+ {
+ if (index < 0 || index > Length)
+ {
+ throw new IndexOutOfRangeException($"Index out of bounds, expected a number between 0 and {Length}");
+ }
+ Array[Offset + index] = value;
+ }
+ }
+
+ ///
+ /// Sets the segment of the backing array to all zeros.
+ ///
+ public void Clear()
+ {
+ System.Array.Clear(Array, Offset, Length);
+ }
+
+ ///
+ /// Check if the segment is empty.
+ ///
+ /// Whether or not the segment is empty.
+ public bool IsEmpty()
+ {
+ return Array == null || Array.Length == 0;
+ }
+
+ ///
+ /// Returns an enumerator that iterates through the ActionSegment.
+ ///
+ /// An IEnumerator object that can be used to iterate through the ActionSegment.
+ IEnumerator IEnumerable.GetEnumerator()
+ {
+ return new Enumerator(this);
+ }
+
+ ///
+ /// Returns an enumerator that iterates through the ActionSegment.
+ ///
+ /// An IEnumerator object that can be used to iterate through the ActionSegment.
+ public IEnumerator GetEnumerator()
+ {
+ return new Enumerator(this);
+ }
+
+ ///
+ /// Indicates whether the current ActionSegment is equal to another ActionSegment.
+ ///
+ /// An ActionSegment to compare with this ActionSegment.
+ /// true if the current ActionSegment is equal to the other parameter; otherwise, false.
+ public override bool Equals(object obj)
+ {
+ if (!(obj is ActionSegment))
+ {
+ return false;
+ }
+ return Equals((ActionSegment)obj);
+ }
+
+ ///
+ /// Indicates whether the current ActionSegment is equal to another ActionSegment.
+ ///
+ /// An ActionSegment to compare with this ActionSegment.
+ /// true if the current ActionSegment is equal to the other parameter; otherwise, false.
+ public bool Equals(ActionSegment other)
+ {
+ return Offset == other.Offset && Length == other.Length && Array.SequenceEqual(other.Array);
+ }
+
+ ///
+ /// Computes the hash code of the ActionSegment.
+ ///
+ /// A hash code for the current ActionSegment.
+ public override int GetHashCode()
+ {
+ unchecked
+ {
+ var hashCode = Offset;
+ hashCode = (hashCode * 397) ^ Length;
+ hashCode = (hashCode * 397) ^ (Array != null ? Array.GetHashCode() : 0);
+ return hashCode;
+ }
+ }
+
+ ///
+ /// A private for the value type which follows its
+ /// rules of being a view into an underlying .
+ ///
+ struct Enumerator : IEnumerator
+ {
+ readonly T[] m_Array;
+ readonly int m_Start;
+ readonly int m_End; // cache Offset + Count, since it's a little slow
+ int m_Current;
+
+ internal Enumerator(ActionSegment arraySegment)
+ {
+ Debug.Assert(arraySegment.Array != null);
+ Debug.Assert(arraySegment.Offset >= 0);
+ Debug.Assert(arraySegment.Length >= 0);
+ Debug.Assert(arraySegment.Offset + arraySegment.Length <= arraySegment.Array.Length);
+
+ m_Array = arraySegment.Array;
+ m_Start = arraySegment.Offset;
+ m_End = arraySegment.Offset + arraySegment.Length;
+ m_Current = arraySegment.Offset - 1;
+ }
+
+ public bool MoveNext()
+ {
+ if (m_Current < m_End)
+ {
+ m_Current++;
+ return m_Current < m_End;
+ }
+ return false;
+ }
+
+ public T Current
+ {
+ get
+ {
+ if (m_Current < m_Start)
+ throw new InvalidOperationException("Enumerator not started.");
+ if (m_Current >= m_End)
+ throw new InvalidOperationException("Enumerator has reached the end already.");
+ return m_Array[m_Current];
+ }
+ }
+
+ object IEnumerator.Current => Current;
+
+ void IEnumerator.Reset()
+ {
+ m_Current = m_Start - 1;
+ }
+
+ public void Dispose()
+ {
+ }
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActionSegment.cs.meta b/com.unity.ml-agents/Runtime/Actuators/ActionSegment.cs.meta
new file mode 100644
index 0000000000..8e08ed0a4a
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActionSegment.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 4fa1432c1ba3460caaa84303a9011ef2
+timeCreated: 1595869823
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs b/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs
new file mode 100644
index 0000000000..6b0a001a7d
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs
@@ -0,0 +1,137 @@
+using System;
+using System.Linq;
+using UnityEngine;
+
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// Defines the structure of the actions to be used by the Actuator system.
+ ///
+ [Serializable]
+ public struct ActionSpec
+ {
+ [SerializeField]
+ int m_NumContinuousActions;
+
+ ///
+ /// An array of branch sizes for discrete actions.
+ ///
+ /// For an IActuator that uses discrete actions, the number of
+ /// branches is the Length of the Array and each index contains the branch size.
+ /// The cumulative sum of the total number of discrete actions can be retrieved
+ /// by the property.
+ ///
+ /// For an IActuator with a Continuous it will be null.
+ ///
+ public int[] BranchSizes;
+
+ ///
+ /// The number of continuous actions that an Agent can take.
+ ///
+ public int NumContinuousActions { get { return m_NumContinuousActions; } set { m_NumContinuousActions = value; } }
+
+ ///
+ /// The number of branches for discrete actions that an Agent can take.
+ ///
+ public int NumDiscreteActions { get { return BranchSizes == null ? 0 : BranchSizes.Length; } }
+
+ ///
+ /// Get the total number of Discrete Actions that can be taken by calculating the Sum
+ /// of all of the Discrete Action branch sizes.
+ ///
+ public int SumOfDiscreteBranchSizes { get { return BranchSizes == null ? 0 : BranchSizes.Sum(); } }
+
+ ///
+ /// Creates a Continuous with the number of actions available.
+ ///
+ /// The number of continuous actions available.
+ /// An Continuous ActionSpec initialized with the number of actions available.
+ public static ActionSpec MakeContinuous(int numActions)
+ {
+ var actuatorSpace = new ActionSpec(numActions, null);
+ return actuatorSpace;
+ }
+
+ ///
+ /// Creates a Discrete with the array of branch sizes that
+ /// represents the action space.
+ ///
+ /// The array of branch sizes for the discrete actions. Each index
+ /// contains the number of actions available for that branch.
+ /// An Discrete ActionSpec initialized with the array of branch sizes.
+ public static ActionSpec MakeDiscrete(params int[] branchSizes)
+ {
+ var actuatorSpace = new ActionSpec(0, branchSizes);
+ return actuatorSpace;
+ }
+
+ ///
+ /// Create an ActionSpec initialized with the specified action sizes.
+ ///
+ /// The number of continuous actions available.
+ /// The array of branch sizes for the discrete actions. Each index
+ /// contains the number of actions available for that branch.
+ public ActionSpec(int numContinuousActions = 0, int[] discreteBranchSizes = null)
+ {
+ m_NumContinuousActions = numContinuousActions;
+ BranchSizes = discreteBranchSizes ?? Array.Empty();
+ }
+
+ ///
+ /// Check that the ActionSpec uses either all continuous or all discrete actions.
+ /// This is only used when connecting to old versions of the trainer that don't support this.
+ ///
+ ///
+ internal void CheckAllContinuousOrDiscrete()
+ {
+ if (NumContinuousActions > 0 && NumDiscreteActions > 0)
+ {
+ throw new UnityAgentsException(
+ "Action spaces with both continuous and discrete actions are not supported by the trainer. " +
+ "ActionSpecs must be all continuous or all discrete."
+ );
+ }
+ }
+
+ ///
+ /// Combines a list of actions specs and allocates a new array of branch sizes if needed.
+ ///
+ /// The list of action specs to combine.
+ /// An ActionSpec which represents the aggregate of the ActionSpecs passed in.
+ public static ActionSpec Combine(params ActionSpec[] specs)
+ {
+ var numContinuous = 0;
+ var numDiscrete = 0;
+ for (var i = 0; i < specs.Length; i++)
+ {
+ var spec = specs[i];
+ numContinuous += spec.NumContinuousActions;
+ numDiscrete += spec.NumDiscreteActions;
+ }
+
+ if (numDiscrete <= 0)
+ {
+ return MakeContinuous(numContinuous);
+ }
+
+ var branchSizes = new int[numDiscrete];
+ var offset = 0;
+ for (var i = 0; i < specs.Length; i++)
+ {
+ var spec = specs[i];
+ if (spec.BranchSizes.Length == 0)
+ {
+ continue;
+ }
+ var branchSizesLength = spec.BranchSizes.Length;
+ Array.Copy(spec.BranchSizes,
+ 0,
+ branchSizes,
+ offset,
+ branchSizesLength);
+ offset += branchSizesLength;
+ }
+ return new ActionSpec(numContinuous, branchSizes);
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs.meta b/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs.meta
new file mode 100644
index 0000000000..a442a91a5e
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: ecdd6deefba1416ca149fe09d2a5afd8
+timeCreated: 1595892361
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs b/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs
new file mode 100644
index 0000000000..af34bef3a3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs
@@ -0,0 +1,25 @@
+using UnityEngine;
+
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// Editor components for creating Actuators. Generally an IActuator component should
+ /// have a corresponding ActuatorComponent.
+ ///
+ public abstract class ActuatorComponent : MonoBehaviour
+ {
+ ///
+ /// Create a collection of s. This is called by the during
+ /// initialization.
+ ///
+ /// A collection of s
+ public abstract IActuator[] CreateActuators();
+
+ ///
+ /// The specification of the possible actions for this ActuatorComponent.
+ /// This must produce the same results as the corresponding IActuator's ActionSpec.
+ ///
+ ///
+ public abstract ActionSpec ActionSpec { get; }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs.meta b/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs.meta
new file mode 100644
index 0000000000..1b7a643ed1
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 77cefae5f6d841be9ff80b41293d271b
+timeCreated: 1593017318
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActuatorDiscreteActionMask.cs b/com.unity.ml-agents/Runtime/Actuators/ActuatorDiscreteActionMask.cs
new file mode 100644
index 0000000000..d44532b16f
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActuatorDiscreteActionMask.cs
@@ -0,0 +1,149 @@
+using System;
+using System.Collections.Generic;
+
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// Implementation of IDiscreteActionMask that allows writing to the action mask from an .
+ ///
+ internal class ActuatorDiscreteActionMask : IDiscreteActionMask
+ {
+ /// When using discrete control, is the starting indices of the actions
+ /// when all the branches are concatenated with each other.
+ int[] m_StartingActionIndices;
+
+ int[] m_BranchSizes;
+
+ bool[] m_CurrentMask;
+
+ IList m_Actuators;
+
+ readonly int m_SumOfDiscreteBranchSizes;
+ readonly int m_NumBranches;
+
+ ///
+ /// The offset into the branches array that is used when actuators are writing to the action mask.
+ ///
+ public int CurrentBranchOffset { get; set; }
+
+ internal ActuatorDiscreteActionMask(IList actuators, int sumOfDiscreteBranchSizes, int numBranches, int[] branchSizes = null)
+ {
+ m_Actuators = actuators;
+ m_SumOfDiscreteBranchSizes = sumOfDiscreteBranchSizes;
+ m_NumBranches = numBranches;
+ m_BranchSizes = branchSizes;
+ }
+
+ ///
+ public void SetActionEnabled(int branch, int actionIndex, bool isEnabled)
+ {
+ LazyInitialize();
+#if DEBUG
+ if (branch >= m_NumBranches || actionIndex >= m_BranchSizes[CurrentBranchOffset + branch])
+ {
+ throw new UnityAgentsException(
+ "Invalid Action Masking: Action Mask is too large for specified branch.");
+ }
+#endif
+ m_CurrentMask[actionIndex + m_StartingActionIndices[CurrentBranchOffset + branch]] = !isEnabled;
+ }
+
+ void LazyInitialize()
+ {
+ if (m_BranchSizes == null)
+ {
+ m_BranchSizes = new int[m_NumBranches];
+ var start = 0;
+ for (var i = 0; i < m_Actuators.Count; i++)
+ {
+ var actuator = m_Actuators[i];
+ var branchSizes = actuator.ActionSpec.BranchSizes;
+ Array.Copy(branchSizes, 0, m_BranchSizes, start, branchSizes.Length);
+ start += branchSizes.Length;
+ }
+ }
+
+ // By default, the masks are null. If we want to specify a new mask, we initialize
+ // the actionMasks with trues.
+ if (m_CurrentMask == null)
+ {
+ m_CurrentMask = new bool[m_SumOfDiscreteBranchSizes];
+ }
+
+ // If this is the first time the masked actions are used, we generate the starting
+ // indices for each branch.
+ if (m_StartingActionIndices == null)
+ {
+ m_StartingActionIndices = Utilities.CumSum(m_BranchSizes);
+ }
+ }
+
+ ///
+ /// Get the current mask for an agent.
+ ///
+ /// A mask for the agent. A boolean array of length equal to the total number of
+ /// actions.
+ internal bool[] GetMask()
+ {
+#if DEBUG
+ if (m_CurrentMask != null)
+ {
+ AssertMask();
+ }
+#endif
+ return m_CurrentMask;
+ }
+
+ ///
+ /// Makes sure that the current mask is usable.
+ ///
+ void AssertMask()
+ {
+#if DEBUG
+ for (var branchIndex = 0; branchIndex < m_NumBranches; branchIndex++)
+ {
+ if (AreAllActionsMasked(branchIndex))
+ {
+ throw new UnityAgentsException(
+ "Invalid Action Masking : All the actions of branch " + branchIndex +
+ " are masked.");
+ }
+ }
+#endif
+ }
+
+ ///
+ /// Resets the current mask for an agent.
+ ///
+ internal void ResetMask()
+ {
+ if (m_CurrentMask != null)
+ {
+ Array.Clear(m_CurrentMask, 0, m_CurrentMask.Length);
+ }
+ }
+
+ ///
+ /// Checks if all the actions in the input branch are masked.
+ ///
+ /// The index of the branch to check.
+ /// True if all the actions of the branch are masked.
+ bool AreAllActionsMasked(int branch)
+ {
+ if (m_CurrentMask == null)
+ {
+ return false;
+ }
+ var start = m_StartingActionIndices[branch];
+ var end = m_StartingActionIndices[branch + 1];
+ for (var i = start; i < end; i++)
+ {
+ if (!m_CurrentMask[i])
+ {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActuatorDiscreteActionMask.cs.meta b/com.unity.ml-agents/Runtime/Actuators/ActuatorDiscreteActionMask.cs.meta
new file mode 100644
index 0000000000..09aa4784b0
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActuatorDiscreteActionMask.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: d2a19e2f43fd4637a38d42b2a5f989f3
+timeCreated: 1595459316
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs b/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs
new file mode 100644
index 0000000000..1ff35557d9
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs
@@ -0,0 +1,500 @@
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using UnityEngine;
+using UnityEngine.Profiling;
+
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// A class that manages the delegation of events, action buffers, and action mask for a list of IActuators.
+ ///
+ internal class ActuatorManager : IList
+ {
+ // IActuators managed by this object.
+ List m_Actuators;
+
+ // An implementation of IDiscreteActionMask that allows for writing to it based on an offset.
+ ActuatorDiscreteActionMask m_DiscreteActionMask;
+
+ ActionSpec m_CombinedActionSpec;
+
+ ///
+ /// Flag used to check if our IActuators are ready for execution.
+ ///
+ ///
+ bool m_ReadyForExecution;
+
+ ///
+ /// The sum of all of the discrete branches for all of the s in this manager.
+ ///
+ internal int SumOfDiscreteBranchSizes { get; private set; }
+
+ ///
+ /// The number of the discrete branches for all of the s in this manager.
+ ///
+ internal int NumDiscreteActions { get; private set; }
+
+ ///
+ /// The number of continuous actions for all of the s in this manager.
+ ///
+ internal int NumContinuousActions { get; private set; }
+
+ ///
+ /// Returns the total actions which is calculated by + .
+ ///
+ public int TotalNumberOfActions => NumContinuousActions + NumDiscreteActions;
+
+ ///
+ /// Gets the managed by this object.
+ ///
+ public ActuatorDiscreteActionMask DiscreteActionMask => m_DiscreteActionMask;
+
+ ///
+ /// The currently stored object for the s managed by this class.
+ ///
+ public ActionBuffers StoredActions { get; private set; }
+
+ ///
+ /// Create an ActuatorList with a preset capacity.
+ ///
+ /// The capacity of the list to create.
+ public ActuatorManager(int capacity = 0)
+ {
+ m_Actuators = new List(capacity);
+ }
+
+ ///
+ ///
+ ///
+ void ReadyActuatorsForExecution()
+ {
+ ReadyActuatorsForExecution(m_Actuators, NumContinuousActions, SumOfDiscreteBranchSizes,
+ NumDiscreteActions);
+ }
+
+ ///
+ /// This method validates that all s have unique names
+ /// if the `DEBUG` preprocessor macro is defined, and allocates the appropriate buffers to manage the actions for
+ /// all of the s that may live on a particular object.
+ ///
+ /// The list of actuators to validate and allocate buffers for.
+ /// The total number of continuous actions for all of the actuators.
+ /// The total sum of the discrete branches for all of the actuators in order
+ /// to be able to allocate an .
+ /// The number of discrete branches for all of the actuators.
+ internal void ReadyActuatorsForExecution(IList actuators, int numContinuousActions, int sumOfDiscreteBranches, int numDiscreteBranches)
+ {
+ if (m_ReadyForExecution)
+ {
+ return;
+ }
+#if DEBUG
+ // Make sure the names are actually unique
+ ValidateActuators();
+#endif
+
+ // Sort the Actuators by name to ensure determinism
+ SortActuators(m_Actuators);
+ var continuousActions = numContinuousActions == 0 ? ActionSegment.Empty :
+ new ActionSegment(new float[numContinuousActions]);
+ var discreteActions = numDiscreteBranches == 0 ? ActionSegment.Empty : new ActionSegment(new int[numDiscreteBranches]);
+
+ StoredActions = new ActionBuffers(continuousActions, discreteActions);
+ m_CombinedActionSpec = CombineActionSpecs(actuators);
+ m_DiscreteActionMask = new ActuatorDiscreteActionMask(actuators, sumOfDiscreteBranches, numDiscreteBranches, m_CombinedActionSpec.BranchSizes);
+ m_ReadyForExecution = true;
+ }
+
+ internal static ActionSpec CombineActionSpecs(IList actuators)
+ {
+ int numContinuousActions = 0;
+ int numDiscreteActions = 0;
+
+ foreach (var actuator in actuators)
+ {
+ numContinuousActions += actuator.ActionSpec.NumContinuousActions;
+ numDiscreteActions += actuator.ActionSpec.NumDiscreteActions;
+ }
+
+ int[] combinedBranchSizes;
+ if (numDiscreteActions == 0)
+ {
+ combinedBranchSizes = Array.Empty();
+ }
+ else
+ {
+ combinedBranchSizes = new int[numDiscreteActions];
+ var start = 0;
+ for (var i = 0; i < actuators.Count; i++)
+ {
+ var branchSizes = actuators[i].ActionSpec.BranchSizes;
+ if (branchSizes != null)
+ {
+ Array.Copy(branchSizes, 0, combinedBranchSizes, start, branchSizes.Length);
+ start += branchSizes.Length;
+ }
+ }
+ }
+
+ return new ActionSpec(numContinuousActions, combinedBranchSizes);
+ }
+
+ ///
+ /// Returns an ActionSpec representing the concatenation of all IActuator's ActionSpecs
+ ///
+ ///
+ public ActionSpec GetCombinedActionSpec()
+ {
+ ReadyActuatorsForExecution();
+ return m_CombinedActionSpec;
+ }
+
+ ///
+ /// Updates the local action buffer with the action buffer passed in. If the buffer
+ /// passed in is null, the local action buffer will be cleared.
+ ///
+ /// The object which contains all of the
+ /// actions for the IActuators in this list.
+ public void UpdateActions(ActionBuffers actions)
+ {
+ Profiler.BeginSample("ActuatorManager.UpdateActions");
+ ReadyActuatorsForExecution();
+ UpdateActionArray(actions.ContinuousActions, StoredActions.ContinuousActions);
+ UpdateActionArray(actions.DiscreteActions, StoredActions.DiscreteActions);
+ Profiler.EndSample();
+ }
+
+ static void UpdateActionArray(ActionSegment sourceActionBuffer, ActionSegment destination)
+ where T : struct
+ {
+ if (sourceActionBuffer.Length <= 0)
+ {
+ destination.Clear();
+ }
+ else
+ {
+ if (sourceActionBuffer.Length != destination.Length)
+ {
+ Debug.AssertFormat(sourceActionBuffer.Length == destination.Length,
+ "sourceActionBuffer: {0} is a different size than destination: {1}.",
+ sourceActionBuffer.Length,
+ destination.Length);
+ }
+
+ Array.Copy(sourceActionBuffer.Array,
+ sourceActionBuffer.Offset,
+ destination.Array,
+ destination.Offset,
+ destination.Length);
+ }
+ }
+
+ ///
+ /// This method will trigger the writing to the by all of the actuators
+ /// managed by this object.
+ ///
+ public void WriteActionMask()
+ {
+ ReadyActuatorsForExecution();
+ m_DiscreteActionMask.ResetMask();
+ var offset = 0;
+ for (var i = 0; i < m_Actuators.Count; i++)
+ {
+ var actuator = m_Actuators[i];
+ if (actuator.ActionSpec.NumDiscreteActions > 0)
+ {
+ m_DiscreteActionMask.CurrentBranchOffset = offset;
+ actuator.WriteDiscreteActionMask(m_DiscreteActionMask);
+ offset += actuator.ActionSpec.NumDiscreteActions;
+ }
+ }
+ }
+
+ ///
+ /// Iterates through all of the IActuators in this list and calls their
+ /// method on them, if implemented, with the appropriate
+ /// s depending on their .
+ ///
+ public void ApplyHeuristic(in ActionBuffers actionBuffersOut)
+ {
+ Profiler.BeginSample("ActuatorManager.ApplyHeuristic");
+ var continuousStart = 0;
+ var discreteStart = 0;
+ for (var i = 0; i < m_Actuators.Count; i++)
+ {
+ var actuator = m_Actuators[i];
+ var numContinuousActions = actuator.ActionSpec.NumContinuousActions;
+ var numDiscreteActions = actuator.ActionSpec.NumDiscreteActions;
+
+ if (numContinuousActions == 0 && numDiscreteActions == 0)
+ {
+ continue;
+ }
+
+ var continuousActions = ActionSegment.Empty;
+ if (numContinuousActions > 0)
+ {
+ continuousActions = new ActionSegment(actionBuffersOut.ContinuousActions.Array,
+ continuousStart,
+ numContinuousActions);
+ }
+
+ var discreteActions = ActionSegment.Empty;
+ if (numDiscreteActions > 0)
+ {
+ discreteActions = new ActionSegment(actionBuffersOut.DiscreteActions.Array,
+ discreteStart,
+ numDiscreteActions);
+ }
+ actuator.Heuristic(new ActionBuffers(continuousActions, discreteActions));
+ continuousStart += numContinuousActions;
+ discreteStart += numDiscreteActions;
+ }
+ Profiler.EndSample();
+ }
+
+ ///
+ /// Iterates through all of the IActuators in this list and calls their
+ /// method on them with the appropriate
+ /// s depending on their .
+ ///
+ public void ExecuteActions()
+ {
+ Profiler.BeginSample("ActuatorManager.ExecuteActions");
+ ReadyActuatorsForExecution();
+ var continuousStart = 0;
+ var discreteStart = 0;
+ for (var i = 0; i < m_Actuators.Count; i++)
+ {
+ var actuator = m_Actuators[i];
+ var numContinuousActions = actuator.ActionSpec.NumContinuousActions;
+ var numDiscreteActions = actuator.ActionSpec.NumDiscreteActions;
+
+ if (numContinuousActions == 0 && numDiscreteActions == 0)
+ {
+ continue;
+ }
+
+ var continuousActions = ActionSegment.Empty;
+ if (numContinuousActions > 0)
+ {
+ continuousActions = new ActionSegment(StoredActions.ContinuousActions.Array,
+ continuousStart,
+ numContinuousActions);
+ }
+
+ var discreteActions = ActionSegment.Empty;
+ if (numDiscreteActions > 0)
+ {
+ discreteActions = new ActionSegment(StoredActions.DiscreteActions.Array,
+ discreteStart,
+ numDiscreteActions);
+ }
+
+ actuator.OnActionReceived(new ActionBuffers(continuousActions, discreteActions));
+ continuousStart += numContinuousActions;
+ discreteStart += numDiscreteActions;
+ }
+ Profiler.EndSample();
+ }
+
+ ///
+ /// Resets the to be all
+ /// zeros and calls on each managed by this object.
+ ///
+ public void ResetData()
+ {
+ if (!m_ReadyForExecution)
+ {
+ return;
+ }
+ StoredActions.Clear();
+ for (var i = 0; i < m_Actuators.Count; i++)
+ {
+ m_Actuators[i].ResetData();
+ }
+ m_DiscreteActionMask.ResetMask();
+ }
+
+ ///
+ /// Sorts the s according to their value.
+ ///
+ internal static void SortActuators(List actuators)
+ {
+ actuators.Sort((x, y) => string.Compare(x.Name, y.Name, StringComparison.InvariantCulture));
+ }
+
+ ///
+ /// Validates that the IActuators managed by this object have unique names.
+ /// Each Actuator needs to have a unique name in order for this object to ensure that the storage of action
+ /// buffers, and execution of Actuators remains deterministic across different sessions of running.
+ ///
+ void ValidateActuators()
+ {
+ for (var i = 0; i < m_Actuators.Count - 1; i++)
+ {
+ Debug.Assert(
+ !m_Actuators[i].Name.Equals(m_Actuators[i + 1].Name),
+ "Actuator names must be unique.");
+ }
+ }
+
+ ///
+ /// Helper method to update bookkeeping items around buffer management for actuators added to this object.
+ ///
+ /// The IActuator to keep bookkeeping for.
+ void AddToBufferSizes(IActuator actuatorItem)
+ {
+ if (actuatorItem == null)
+ {
+ return;
+ }
+
+ NumContinuousActions += actuatorItem.ActionSpec.NumContinuousActions;
+ NumDiscreteActions += actuatorItem.ActionSpec.NumDiscreteActions;
+ SumOfDiscreteBranchSizes += actuatorItem.ActionSpec.SumOfDiscreteBranchSizes;
+ }
+
+ ///
+ /// Helper method to update bookkeeping items around buffer management for actuators removed from this object.
+ ///
+ /// The IActuator to keep bookkeeping for.
+ void SubtractFromBufferSize(IActuator actuatorItem)
+ {
+ if (actuatorItem == null)
+ {
+ return;
+ }
+
+ NumContinuousActions -= actuatorItem.ActionSpec.NumContinuousActions;
+ NumDiscreteActions -= actuatorItem.ActionSpec.NumDiscreteActions;
+ SumOfDiscreteBranchSizes -= actuatorItem.ActionSpec.SumOfDiscreteBranchSizes;
+ }
+
+ ///
+ /// Sets all of the bookkeeping items back to 0.
+ ///
+ void ClearBufferSizes()
+ {
+ NumContinuousActions = NumDiscreteActions = SumOfDiscreteBranchSizes = 0;
+ }
+
+ ///
+ /// Add an array of s at once.
+ ///
+ /// The array of s to add.
+ public void AddActuators(IActuator[] actuators)
+ {
+ for (var i = 0; i < actuators.Length; i++)
+ {
+ Add(actuators[i]);
+ }
+ }
+
+ /*********************************************************************************
+ * IList implementation that delegates to m_Actuators List. *
+ *********************************************************************************/
+
+ ///
+ public IEnumerator GetEnumerator()
+ {
+ return m_Actuators.GetEnumerator();
+ }
+
+ ///
+ IEnumerator IEnumerable.GetEnumerator()
+ {
+ return ((IEnumerable)m_Actuators).GetEnumerator();
+ }
+
+ ///
+ public void Add(IActuator item)
+ {
+ Debug.Assert(m_ReadyForExecution == false,
+ "Cannot add to the ActuatorManager after its buffers have been initialized");
+ m_Actuators.Add(item);
+ AddToBufferSizes(item);
+ }
+
+ ///
+ public void Clear()
+ {
+ Debug.Assert(m_ReadyForExecution == false,
+ "Cannot clear the ActuatorManager after its buffers have been initialized");
+ m_Actuators.Clear();
+ ClearBufferSizes();
+ }
+
+ ///
+ public bool Contains(IActuator item)
+ {
+ return m_Actuators.Contains(item);
+ }
+
+ ///
+ public void CopyTo(IActuator[] array, int arrayIndex)
+ {
+ m_Actuators.CopyTo(array, arrayIndex);
+ }
+
+ ///
+ public bool Remove(IActuator item)
+ {
+ Debug.Assert(m_ReadyForExecution == false,
+ "Cannot remove from the ActuatorManager after its buffers have been initialized");
+ if (m_Actuators.Remove(item))
+ {
+ SubtractFromBufferSize(item);
+ return true;
+ }
+ return false;
+ }
+
+ ///
+ public int Count => m_Actuators.Count;
+
+ ///
+ public bool IsReadOnly => false;
+
+ ///
+ public int IndexOf(IActuator item)
+ {
+ return m_Actuators.IndexOf(item);
+ }
+
+ ///
+ public void Insert(int index, IActuator item)
+ {
+ Debug.Assert(m_ReadyForExecution == false,
+ "Cannot insert into the ActuatorManager after its buffers have been initialized");
+ m_Actuators.Insert(index, item);
+ AddToBufferSizes(item);
+ }
+
+ ///
+ public void RemoveAt(int index)
+ {
+ Debug.Assert(m_ReadyForExecution == false,
+ "Cannot remove from the ActuatorManager after its buffers have been initialized");
+ var actuator = m_Actuators[index];
+ SubtractFromBufferSize(actuator);
+ m_Actuators.RemoveAt(index);
+ }
+
+ ///
+ public IActuator this[int index]
+ {
+ get => m_Actuators[index];
+ set
+ {
+ Debug.Assert(m_ReadyForExecution == false,
+ "Cannot modify the ActuatorManager after its buffers have been initialized");
+ var old = m_Actuators[index];
+ SubtractFromBufferSize(old);
+ m_Actuators[index] = value;
+ AddToBufferSizes(value);
+ }
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs.meta b/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs.meta
new file mode 100644
index 0000000000..aa56b5ca9f
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 7bb5b1e3779d4342a8e70f6e3c1d67cc
+timeCreated: 1593031463
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs b/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs
new file mode 100644
index 0000000000..1586e6215b
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs
@@ -0,0 +1,192 @@
+using System;
+using System.Linq;
+using UnityEngine;
+
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// A structure that wraps the s for a particular and is
+ /// used when is called.
+ ///
+ public readonly struct ActionBuffers
+ {
+ ///
+ /// An empty action buffer.
+ ///
+ public static ActionBuffers Empty = new ActionBuffers(ActionSegment.Empty, ActionSegment.Empty);
+
+ ///
+ /// Holds the Continuous to be used by an .
+ ///
+ public ActionSegment ContinuousActions { get; }
+
+ ///
+ /// Holds the Discrete to be used by an .
+ ///
+ public ActionSegment DiscreteActions { get; }
+
+ ///
+ /// Create an instance with discrete actions stored as a float array. This exists
+ /// to achieve backward compatibility with the former Agent methods which used a float array for both continuous
+ /// and discrete actions.
+ ///
+ /// The float array of discrete actions.
+ /// An instance initialized with a
+ /// initialized from a float array.
+ public static ActionBuffers FromDiscreteActions(float[] discreteActions)
+ {
+ return new ActionBuffers(ActionSegment.Empty, discreteActions == null ? ActionSegment.Empty
+ : new ActionSegment(Array.ConvertAll(discreteActions,
+ x => (int)x)));
+ }
+
+ ///
+ /// Construct an instance with the continuous and discrete actions that will
+ /// be used.
+ /// ///
+ /// The continuous actions to send to an .
+ /// The discrete actions to send to an .
+ public ActionBuffers(float[] continuousActions, int[] discreteActions)
+ : this(new ActionSegment(continuousActions), new ActionSegment(discreteActions)) { }
+
+ ///
+ /// Construct an instance with the continuous and discrete actions that will
+ /// be used.
+ ///
+ /// The continuous actions to send to an .
+ /// The discrete actions to send to an .
+ public ActionBuffers(ActionSegment continuousActions, ActionSegment discreteActions)
+ {
+ ContinuousActions = continuousActions;
+ DiscreteActions = discreteActions;
+ }
+
+ ///
+ /// Construct an instance with . All values are initialized to zeros.
+ /// ///
+ /// The to send to an .
+ public ActionBuffers(ActionSpec actionSpec)
+ : this(new ActionSegment(new float[actionSpec.NumContinuousActions]),
+ new ActionSegment(new int[actionSpec.NumDiscreteActions]))
+ { }
+
+ ///
+ /// Create an instance with ActionSpec and all actions stored as a float array.
+ ///
+ /// of the
+ /// The float array of all actions, including discrete and continuous actions.
+ /// An instance initialized with a and a float array.
+ internal static ActionBuffers FromActionSpec(ActionSpec actionSpec, float[] actions)
+ {
+ if (actions == null)
+ {
+ return ActionBuffers.Empty;
+ }
+
+ Debug.Assert(actions.Length == actionSpec.NumContinuousActions + actionSpec.NumDiscreteActions,
+ $"The length of '{nameof(actions)}' does not match the total size of ActionSpec.\n" +
+ $"{nameof(actions)}.Length: {actions.Length}\n" +
+ $"{nameof(actionSpec)}: {actionSpec.NumContinuousActions + actionSpec.NumDiscreteActions}");
+
+ ActionSegment continuousActionSegment = ActionSegment.Empty;
+ ActionSegment discreteActionSegment = ActionSegment.Empty;
+ int offset = 0;
+ if (actionSpec.NumContinuousActions > 0)
+ {
+ continuousActionSegment = new ActionSegment(actions, 0, actionSpec.NumContinuousActions);
+ offset += actionSpec.NumContinuousActions;
+ }
+ if (actionSpec.NumDiscreteActions > 0)
+ {
+ int[] discreteActions = new int[actionSpec.NumDiscreteActions];
+ for (var i = 0; i < actionSpec.NumDiscreteActions; i++)
+ {
+ discreteActions[i] = (int)actions[i + offset];
+ }
+ discreteActionSegment = new ActionSegment(discreteActions);
+ }
+
+ return new ActionBuffers(continuousActionSegment, discreteActionSegment);
+ }
+
+ ///
+ /// Clear the and segments to be all zeros.
+ ///
+ public void Clear()
+ {
+ ContinuousActions.Clear();
+ DiscreteActions.Clear();
+ }
+
+ ///
+ /// Check if the is empty.
+ ///
+ /// Whether the buffers are empty.
+ public bool IsEmpty()
+ {
+ return ContinuousActions.IsEmpty() && DiscreteActions.IsEmpty();
+ }
+
+ ///
+ /// Indicates whether the current ActionBuffers is equal to another ActionBuffers.
+ ///
+ /// An ActionBuffers to compare with this ActionBuffers.
+ /// true if the current ActionBuffers is equal to the other parameter; otherwise, false.
+ public override bool Equals(object obj)
+ {
+ if (!(obj is ActionBuffers))
+ {
+ return false;
+ }
+
+ var ab = (ActionBuffers)obj;
+ return ab.ContinuousActions.SequenceEqual(ContinuousActions) &&
+ ab.DiscreteActions.SequenceEqual(DiscreteActions);
+ }
+
+ ///
+ /// Computes the hash code of the ActionBuffers.
+ ///
+ /// A hash code for the current ActionBuffers.
+ public override int GetHashCode()
+ {
+ unchecked
+ {
+ return (ContinuousActions.GetHashCode() * 397) ^ DiscreteActions.GetHashCode();
+ }
+ }
+ }
+
+ ///
+ /// An interface that describes an object that can receive actions from a Reinforcement Learning network.
+ ///
+ public interface IActionReceiver
+ {
+ ///
+ /// Method called in order too allow object to execute actions based on the
+ /// contents. The structure of the contents in the
+ /// are defined by the .
+ ///
+ /// The data structure containing the action buffers for this object.
+ void OnActionReceived(ActionBuffers actionBuffers);
+
+ ///
+ /// Implement `WriteDiscreteActionMask()` to modify the masks for discrete
+ /// actions. When using discrete actions, the agent will not perform the masked
+ /// action.
+ ///
+ ///
+ /// The action mask for the agent.
+ ///
+ ///
+ /// When using Discrete Control, you can prevent the Agent from using a certain
+ /// action by masking it with .
+ ///
+ /// See [Agents - Actions] for more information on masking actions.
+ ///
+ /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#actions
+ ///
+ ///
+ void WriteDiscreteActionMask(IDiscreteActionMask actionMask);
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs.meta b/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs.meta
new file mode 100644
index 0000000000..b14a69d21c
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: b25a5b3027c9476ea1a310241be0f10f
+timeCreated: 1594756775
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/IActuator.cs b/com.unity.ml-agents/Runtime/Actuators/IActuator.cs
new file mode 100644
index 0000000000..aa2675905a
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IActuator.cs
@@ -0,0 +1,42 @@
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// Abstraction that facilitates the execution of actions.
+ ///
+ public interface IActuator : IActionReceiver, IHeuristicProvider
+ {
+ ///
+ /// The specification of the actions for this IActuator.
+ ///
+ ///
+ ActionSpec ActionSpec { get; }
+
+ ///
+ /// Gets the name of this IActuator which will be used to sort it.
+ ///
+ ///
+ string Name { get; }
+
+ ///
+ /// Resets the internal state of the actuator. This is called at the end of an Agent's episode.
+ /// Most implementations can leave this empty.
+ ///
+ void ResetData();
+ }
+
+ ///
+ /// Helper methods to be shared by all classes that implement .
+ ///
+ public static class IActuatorExtensions
+ {
+ ///
+ /// Returns the number of discrete branches + the number of continuous actions.
+ ///
+ ///
+ ///
+ public static int TotalNumberOfActions(this IActuator actuator)
+ {
+ return actuator.ActionSpec.NumContinuousActions + actuator.ActionSpec.NumDiscreteActions;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/IActuator.cs.meta b/com.unity.ml-agents/Runtime/Actuators/IActuator.cs.meta
new file mode 100644
index 0000000000..4fd0d172ca
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IActuator.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 780d7f0a675f44bfa784b370025b51c3
+timeCreated: 1592848317
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/IBuiltInActuator.cs b/com.unity.ml-agents/Runtime/Actuators/IBuiltInActuator.cs
new file mode 100644
index 0000000000..8b77672d17
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IBuiltInActuator.cs
@@ -0,0 +1,49 @@
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// Identifiers for "built in" actuator types.
+ /// These are only used for analytics, and should not be used for any runtime decisions.
+ ///
+ /// NOTE: Do not renumber these, since the values are used for analytics. Renaming is allowed though.
+ ///
+ public enum BuiltInActuatorType
+ {
+ ///
+ /// Default Sensor type if it cannot be determined.
+ ///
+ Unknown = 0,
+
+ ///
+ /// VectorActuator used by the Agent
+ ///
+ AgentVectorActuator = 1,
+
+ ///
+ /// Corresponds to
+ ///
+ VectorActuator = 2,
+
+ ///
+ /// Corresponds to the Match3Actuator in com.unity.ml-agents.extensions.
+ ///
+ Match3Actuator = 3,
+
+ ///
+ /// Corresponds to the InputActionActuator in com.unity.ml-agents.extensions.
+ ///
+ InputActionActuator = 4,
+ }
+
+ ///
+ /// Interface for actuators that are provided as part of ML-Agents.
+ /// User-implemented actuators don't need to use this interface.
+ ///
+ internal interface IBuiltInActuator
+ {
+ ///
+ /// Return the corresponding BuiltInActuatorType for the actuator.
+ ///
+ /// A BuiltInActuatorType corresponding to the actuator.
+ BuiltInActuatorType GetBuiltInActuatorType();
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/IBuiltInActuator.cs.meta b/com.unity.ml-agents/Runtime/Actuators/IBuiltInActuator.cs.meta
new file mode 100644
index 0000000000..da1d96f271
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IBuiltInActuator.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: e3d7ef9a9a5043549cc5c0bbee520810
+timeCreated: 1613514041
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs b/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs
new file mode 100644
index 0000000000..1a100b68e1
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs
@@ -0,0 +1,26 @@
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// Interface for writing a mask to disable discrete actions for agents for the next decision.
+ ///
+ public interface IDiscreteActionMask
+ {
+ ///
+ /// Set whether or not the action index for the given branch is allowed.
+ ///
+ ///
+ /// By default, all discrete actions are allowed.
+ /// If isEnabled is false, the agent will not be able to perform the actions passed as argument
+ /// at the next decision for the specified action branch. The actionIndex corresponds
+ /// to the action options the agent will be unable to perform.
+ ///
+ /// See [Agents - Actions] for more information on masking actions.
+ ///
+ /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#masking-discrete-actions
+ ///
+ /// The branch for which the actions will be masked.
+ /// Index of the action.
+ /// Whether the action is allowed or not.
+ void SetActionEnabled(int branch, int actionIndex, bool isEnabled);
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs.meta b/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs.meta
new file mode 100644
index 0000000000..ebfa10158f
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 1bc4e4b71bf4470789488fab2ee65388
+timeCreated: 1595369065
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/IHeuristicProvider.cs b/com.unity.ml-agents/Runtime/Actuators/IHeuristicProvider.cs
new file mode 100644
index 0000000000..b992361c83
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IHeuristicProvider.cs
@@ -0,0 +1,18 @@
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// Interface that allows objects to fill out an data structure for controlling
+ /// behavior of Agents or Actuators.
+ ///
+ public interface IHeuristicProvider
+ {
+ ///
+ /// Method called on objects which are expected to fill out the data structure.
+ /// Object that implement this interface should be careful to be consistent in the placement of their actions
+ /// in the data structure.
+ ///
+ /// The data structure to be filled by the
+ /// object implementing this interface.
+ void Heuristic(in ActionBuffers actionBuffersOut);
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/IHeuristicProvider.cs.meta b/com.unity.ml-agents/Runtime/Actuators/IHeuristicProvider.cs.meta
new file mode 100644
index 0000000000..ca8338a072
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/IHeuristicProvider.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: be90ffb28f39444a8fb02dfd4a82870c
+timeCreated: 1610057456
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs b/com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs
new file mode 100644
index 0000000000..586058aad3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs
@@ -0,0 +1,105 @@
+using UnityEngine.Profiling;
+
+namespace Unity.MLAgents.Actuators
+{
+ ///
+ /// IActuator implementation that forwards calls to an and an .
+ ///
+ internal class VectorActuator : IActuator, IBuiltInActuator
+ {
+ IActionReceiver m_ActionReceiver;
+ IHeuristicProvider m_HeuristicProvider;
+
+ ActionBuffers m_ActionBuffers;
+ internal ActionBuffers ActionBuffers
+ {
+ get => m_ActionBuffers;
+ private set => m_ActionBuffers = value;
+ }
+
+ ///
+ /// Create a VectorActuator that forwards to the provided IActionReceiver.
+ ///
+ /// The used for OnActionReceived and WriteDiscreteActionMask.
+ /// If this parameter also implements it will be cast and used to forward calls to
+ /// .
+ ///
+ ///
+ public VectorActuator(IActionReceiver actionReceiver,
+ ActionSpec actionSpec,
+ string name = "VectorActuator")
+ : this(actionReceiver, actionReceiver as IHeuristicProvider, actionSpec, name) { }
+
+ ///
+ /// Create a VectorActuator that forwards to the provided IActionReceiver.
+ ///
+ /// The used for OnActionReceived and WriteDiscreteActionMask.
+ /// The used to fill the
+ /// for Heuristic Policies.
+ ///
+ ///
+ public VectorActuator(IActionReceiver actionReceiver,
+ IHeuristicProvider heuristicProvider,
+ ActionSpec actionSpec,
+ string name = "VectorActuator")
+ {
+ m_ActionReceiver = actionReceiver;
+ m_HeuristicProvider = heuristicProvider;
+ ActionSpec = actionSpec;
+ string suffix;
+ if (actionSpec.NumContinuousActions == 0)
+ {
+ suffix = "-Discrete";
+ }
+ else if (actionSpec.NumDiscreteActions == 0)
+ {
+ suffix = "-Continuous";
+ }
+ else
+ {
+ suffix = $"-Continuous-{actionSpec.NumContinuousActions}-Discrete-{actionSpec.NumDiscreteActions}";
+ }
+ Name = name + suffix;
+ }
+
+ ///
+ public void ResetData()
+ {
+ m_ActionBuffers = ActionBuffers.Empty;
+ }
+
+ ///
+ public void OnActionReceived(ActionBuffers actionBuffers)
+ {
+ Profiler.BeginSample("VectorActuator.OnActionReceived");
+ m_ActionBuffers = actionBuffers;
+ m_ActionReceiver.OnActionReceived(m_ActionBuffers);
+ Profiler.EndSample();
+ }
+
+ public void Heuristic(in ActionBuffers actionBuffersOut)
+ {
+ Profiler.BeginSample("VectorActuator.Heuristic");
+ m_HeuristicProvider?.Heuristic(actionBuffersOut);
+ Profiler.EndSample();
+ }
+
+ ///
+ public void WriteDiscreteActionMask(IDiscreteActionMask actionMask)
+ {
+ m_ActionReceiver.WriteDiscreteActionMask(actionMask);
+ }
+
+ ///
+ public ActionSpec ActionSpec { get; }
+
+ ///
+ public string Name { get; }
+
+ ///
+ public virtual BuiltInActuatorType GetBuiltInActuatorType()
+ {
+ return BuiltInActuatorType.VectorActuator;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs.meta b/com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs.meta
new file mode 100644
index 0000000000..6e9f68b913
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: ff7a3292c0b24b23b3f1c0eeb690ec4c
+timeCreated: 1593023833
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Agent.cs b/com.unity.ml-agents/Runtime/Agent.cs
new file mode 100644
index 0000000000..8d5c6e79d2
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Agent.cs
@@ -0,0 +1,1427 @@
+using System;
+using System.Collections.Generic;
+using System.Collections.ObjectModel;
+using UnityEngine;
+using Unity.Barracuda;
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.Sensors;
+using Unity.MLAgents.Sensors.Reflection;
+using Unity.MLAgents.Demonstrations;
+using Unity.MLAgents.Policies;
+using UnityEngine.Serialization;
+
+namespace Unity.MLAgents
+{
+ ///
+ /// Struct that contains all the information for an Agent, including its
+ /// observations, actions and current status.
+ ///
+ public struct AgentInfo
+ {
+ ///
+ /// Keeps track of the last actions taken by the Brain.
+ ///
+ public ActionBuffers storedActions;
+
+ ///
+ /// For discrete control, specifies the actions that the agent cannot take.
+ /// An element of the mask array is true if the action is prohibited.
+ ///
+ public bool[] discreteActionMasks;
+
+ ///
+ /// The current agent reward.
+ ///
+ public float reward;
+
+ ///
+ /// The current group reward received by the agent.
+ ///
+ public float groupReward;
+
+ ///
+ /// Whether the agent is done or not.
+ ///
+ public bool done;
+
+ ///
+ /// Whether the agent has reached its max step count for this episode.
+ ///
+ public bool maxStepReached;
+
+ ///
+ /// Episode identifier each agent receives at every reset. It is used
+ /// to separate between different agents in the environment.
+ ///
+ public int episodeId;
+
+ ///
+ /// MultiAgentGroup identifier.
+ ///
+ public int groupId;
+
+ public void ClearActions()
+ {
+ storedActions.Clear();
+ }
+
+ public void CopyActions(ActionBuffers actionBuffers)
+ {
+ var continuousActions = storedActions.ContinuousActions;
+ for (var i = 0; i < actionBuffers.ContinuousActions.Length; i++)
+ {
+ continuousActions[i] = actionBuffers.ContinuousActions[i];
+ }
+ var discreteActions = storedActions.DiscreteActions;
+ for (var i = 0; i < actionBuffers.DiscreteActions.Length; i++)
+ {
+ discreteActions[i] = actionBuffers.DiscreteActions[i];
+ }
+ }
+ }
+
+ ///
+ /// Simple wrapper around VectorActuator that overrides GetBuiltInActuatorType
+ /// so that it can be distinguished from a standard VectorActuator.
+ ///
+ internal class AgentVectorActuator : VectorActuator
+ {
+ public AgentVectorActuator(IActionReceiver actionReceiver,
+ IHeuristicProvider heuristicProvider,
+ ActionSpec actionSpec,
+ string name = "VectorActuator"
+ ) : base(actionReceiver, heuristicProvider, actionSpec, name)
+ { }
+
+ public override BuiltInActuatorType GetBuiltInActuatorType()
+ {
+ return BuiltInActuatorType.AgentVectorActuator;
+ }
+ }
+
+ ///
+ /// An agent is an actor that can observe its environment, decide on the
+ /// best course of action using those observations, and execute those actions
+ /// within the environment.
+ ///
+ ///
+ /// Use the Agent class as the subclass for implementing your own agents. Add
+ /// your Agent implementation to a [GameObject] in the [Unity scene] that serves
+ /// as the agent's environment.
+ ///
+ /// Agents in an environment operate in *steps*. At each step, an agent collects observations,
+ /// passes them to its decision-making policy, and receives an action vector in response.
+ ///
+ /// Agents make observations using implementations. The ML-Agents
+ /// API provides implementations for visual observations ()
+ /// raycast observations (), and arbitrary
+ /// data observations (). You can add the
+ /// and or
+ /// components to an agent's [GameObject] to use
+ /// those sensor types. You can implement the
+ /// function in your Agent subclass to use a vector observation. The Agent class calls this
+ /// function before it uses the observation vector to make a decision. (If you only use
+ /// visual or raycast observations, you do not need to implement
+ /// .)
+ ///
+ /// Assign a decision making policy to an agent using a
+ /// component attached to the agent's [GameObject]. The setting
+ /// determines how decisions are made:
+ ///
+ /// * : decisions are made by the external process,
+ /// when connected. Otherwise, decisions are made using inference. If no inference model
+ /// is specified in the BehaviorParameters component, then heuristic decision
+ /// making is used.
+ /// * : decisions are always made using the trained
+ /// model specified in the component.
+ /// * : when a decision is needed, the agent's
+ /// function is called. Your implementation is responsible for
+ /// providing the appropriate action.
+ ///
+ /// To trigger an agent decision automatically, you can attach a
+ /// component to the Agent game object. You can also call the agent's
+ /// function manually. You only need to call when the agent is
+ /// in a position to act upon the decision. In many cases, this will be every [FixedUpdate]
+ /// callback, but could be less frequent. For example, an agent that hops around its environment
+ /// can only take an action when it touches the ground, so several frames might elapse between
+ /// one decision and the need for the next.
+ ///
+ /// Use the function to implement the actions your agent can take,
+ /// such as moving to reach a goal or interacting with its environment.
+ ///
+ /// When you call on an agent or the agent reaches its count,
+ /// its current episode ends. You can reset the agent -- or remove it from the
+ /// environment -- by implementing the function. An agent also
+ /// becomes done when the resets the environment, which only happens when
+ /// the receives a reset signal from an external process via the
+ /// .
+ ///
+ /// The Agent class extends the Unity [MonoBehaviour] class. You can implement the
+ /// standard [MonoBehaviour] functions as needed for your agent. Since an agent's
+ /// observations and actions typically take place during the [FixedUpdate] phase, you should
+ /// only use the [MonoBehaviour.Update] function for cosmetic purposes. If you override the [MonoBehaviour]
+ /// methods, [OnEnable()] or [OnDisable()], always call the base Agent class implementations.
+ ///
+ /// You can implement the function to specify agent actions using
+ /// your own heuristic algorithm. Implementing a heuristic function can be useful
+ /// for debugging. For example, you can use keyboard input to select agent actions in
+ /// order to manually control an agent's behavior.
+ ///
+ /// Note that you can change the inference model assigned to an agent at any step
+ /// by calling .
+ ///
+ /// See [Agents] and [Reinforcement Learning in Unity] in the [Unity ML-Agents Toolkit manual] for
+ /// more information on creating and training agents.
+ ///
+ /// For sample implementations of agent behavior, see the examples available in the
+ /// [Unity ML-Agents Toolkit] on Github.
+ ///
+ /// [MonoBehaviour]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.html
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ /// [Unity scene]: https://docs.unity3d.com/Manual/CreatingScenes.html
+ /// [FixedUpdate]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.FixedUpdate.html
+ /// [MonoBehaviour.Update]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.Update.html
+ /// [OnEnable()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnEnable.html
+ /// [OnDisable()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnDisable.html]
+ /// [OnBeforeSerialize()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnBeforeSerialize.html
+ /// [OnAfterSerialize()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnAfterSerialize.html
+ /// [Agents]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md
+ /// [Reinforcement Learning in Unity]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design.md
+ /// [Unity ML-Agents Toolkit]: https://github.com/Unity-Technologies/ml-agents
+ /// [Unity ML-Agents Toolkit manual]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Readme.md
+ ///
+ ///
+ [HelpURL("https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/" +
+ "docs/Learning-Environment-Design-Agents.md")]
+ [Serializable]
+ [RequireComponent(typeof(BehaviorParameters))]
+ [DefaultExecutionOrder(-50)]
+ public partial class Agent : MonoBehaviour, ISerializationCallbackReceiver, IActionReceiver, IHeuristicProvider
+ {
+ IPolicy m_Brain;
+ BehaviorParameters m_PolicyFactory;
+
+ /// This code is here to make the upgrade path for users using MaxStep
+ /// easier. We will hook into the Serialization code and make sure that
+ /// agentParameters.maxStep and this.maxStep are in sync.
+ [Serializable]
+ internal struct AgentParameters
+ {
+ public int maxStep;
+ }
+
+ [SerializeField]
+ [HideInInspector]
+ internal AgentParameters agentParameters;
+ [SerializeField]
+ [HideInInspector]
+ internal bool hasUpgradedFromAgentParameters;
+
+ ///
+ /// The maximum number of steps the agent takes before being done.
+ ///
+ /// The maximum steps for an agent to take before it resets; or 0 for
+ /// unlimited steps.
+ ///
+ /// The max step value determines the maximum length of an agent's episodes.
+ /// Set to a positive integer to limit the episode length to that many steps.
+ /// Set to 0 for unlimited episode length.
+ ///
+ /// When an episode ends and a new one begins, the Agent object's
+ /// function is called. You can implement
+ /// to reset the agent or remove it from the
+ /// environment. An agent's episode can also end if you call its
+ /// method or an external process resets the environment through the .
+ ///
+ /// Consider limiting the number of steps in an episode to avoid wasting time during
+ /// training. If you set the max step value to a reasonable estimate of the time it should
+ /// take to complete a task, then agents that haven’t succeeded in that time frame will
+ /// reset and start a new training episode rather than continue to fail.
+ ///
+ ///
+ /// To use a step limit when training while allowing agents to run without resetting
+ /// outside of training, you can set the max step to 0 in
+ /// if the is not connected to an external process.
+ ///
+ /// using Unity.MLAgents;
+ ///
+ /// public class MyAgent : Agent
+ /// {
+ /// public override void Initialize()
+ /// {
+ /// if (!Academy.Instance.IsCommunicatorOn)
+ /// {
+ /// this.MaxStep = 0;
+ /// }
+ /// }
+ /// }
+ ///
+ /// **Note:** in general, you should limit the differences between the code you execute
+ /// during training and the code you run during inference.
+ ///
+ [FormerlySerializedAs("maxStep")]
+ [HideInInspector] public int MaxStep;
+
+ /// Current Agent information (message sent to Brain).
+ AgentInfo m_Info;
+
+ /// Represents the reward the agent accumulated during the current step.
+ /// It is reset to 0 at the beginning of every step.
+ /// Should be set to a positive value when the agent performs a "good"
+ /// action that we wish to reinforce/reward, and set to a negative value
+ /// when the agent performs a "bad" action that we wish to punish/deter.
+ /// Additionally, the magnitude of the reward should not exceed 1.0
+ float m_Reward;
+
+ /// Represents the group reward the agent accumulated during the current step.
+ float m_GroupReward;
+
+ /// Keeps track of the cumulative reward in this episode.
+ float m_CumulativeReward;
+
+ /// Whether or not the agent requests an action.
+ bool m_RequestAction;
+
+ /// Whether or not the agent requests a decision.
+ bool m_RequestDecision;
+
+ /// Keeps track of the number of steps taken by the agent in this episode.
+ /// Note that this value is different for each agent, and may not overlap
+ /// with the step counter in the Academy, since agents reset based on
+ /// their own experience.
+ int m_StepCount;
+
+ /// Number of times the Agent has completed an episode.
+ int m_CompletedEpisodes;
+
+ /// Episode identifier each agent receives. It is used
+ /// to separate between different agents in the environment.
+ /// This Id will be changed every time the Agent resets.
+ int m_EpisodeId;
+
+ /// Whether or not the Agent has been initialized already
+ bool m_Initialized;
+
+ ///
+ /// Set of DemonstrationWriters that the Agent will write its step information to.
+ /// If you use a DemonstrationRecorder component, this will automatically register its DemonstrationWriter.
+ /// You can also add your own DemonstrationWriter by calling
+ /// DemonstrationRecorder.AddDemonstrationWriterToAgent()
+ ///
+ internal ISet DemonstrationWriters = new HashSet();
+
+ ///
+ /// List of sensors used to generate observations.
+ /// Currently generated from attached SensorComponents, and a legacy VectorSensor
+ ///
+ internal List sensors;
+
+ ///
+ /// VectorSensor which is written to by AddVectorObs
+ ///
+ internal VectorSensor collectObservationsSensor;
+
+ ///
+ /// StackingSensor which is written to by AddVectorObs
+ ///
+ internal StackingSensor stackedCollectObservationsSensor;
+
+ private RecursionChecker m_CollectObservationsChecker = new RecursionChecker("CollectObservations");
+ private RecursionChecker m_OnEpisodeBeginChecker = new RecursionChecker("OnEpisodeBegin");
+
+ ///
+ /// List of IActuators that this Agent will delegate actions to if any exist.
+ ///
+ ActuatorManager m_ActuatorManager;
+
+ ///
+ /// VectorActuator which is used by default if no other sensors exist on this Agent. This VectorSensor will
+ /// delegate its actions to by default in order to keep backward compatibility
+ /// with the current behavior of Agent.
+ ///
+ IActuator m_VectorActuator;
+
+ /// Currect MultiAgentGroup ID. Default to 0 (meaning no group)
+ int m_GroupId;
+
+ /// Delegate for the agent to unregister itself from the MultiAgentGroup without cyclic reference
+ /// between agent and the group
+ internal event Action OnAgentDisabled;
+
+ ///
+ /// Called when the Agent is being loaded (before OnEnable()).
+ ///
+ ///
+ /// This function registers the RpcCommunicator delegate if no delegate has been registered with CommunicatorFactory.
+ /// Always call the base Agent class version of this function if you implement `Awake()` in your
+ /// own Agent subclasses.
+ ///
+ ///
+ ///
+ /// protected override void Awake()
+ /// {
+ /// base.Awake();
+ /// // additional Awake logic...
+ /// }
+ ///
+ ///
+ protected internal virtual void Awake()
+ {
+#if UNITY_EDITOR || UNITY_STANDALONE
+ if (!CommunicatorFactory.CommunicatorRegistered)
+ {
+ Debug.Log("Registered Communicator in Agent.");
+ CommunicatorFactory.Register(RpcCommunicator.Create);
+ }
+#endif
+ }
+
+ ///
+ /// Called when the attached [GameObject] becomes enabled and active.
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ ///
+ ///
+ /// This function initializes the Agent instance, if it hasn't been initialized yet.
+ /// Always call the base Agent class version of this function if you implement `OnEnable()`
+ /// in your own Agent subclasses.
+ ///
+ ///
+ ///
+ /// protected override void OnEnable()
+ /// {
+ /// base.OnEnable();
+ /// // additional OnEnable logic...
+ /// }
+ ///
+ ///
+ protected virtual void OnEnable()
+ {
+ LazyInitialize();
+ }
+
+ ///
+ /// Called by Unity immediately before serializing this object.
+ ///
+ ///
+ /// The Agent class uses OnBeforeSerialize() for internal housekeeping. Call the
+ /// base class implementation if you need your own custom serialization logic.
+ ///
+ /// See [OnBeforeSerialize] for more information.
+ ///
+ /// [OnBeforeSerialize]: https://docs.unity3d.com/ScriptReference/ISerializationCallbackReceiver.OnAfterDeserialize.html
+ ///
+ ///
+ ///
+ /// public new void OnBeforeSerialize()
+ /// {
+ /// base.OnBeforeSerialize();
+ /// // additional serialization logic...
+ /// }
+ ///
+ ///
+ public void OnBeforeSerialize()
+ {
+ // Manages a serialization upgrade issue from v0.13 to v0.14 where MaxStep moved
+ // from AgentParameters (since removed) to Agent
+ if (MaxStep == 0 && MaxStep != agentParameters.maxStep && !hasUpgradedFromAgentParameters)
+ {
+ MaxStep = agentParameters.maxStep;
+ }
+ hasUpgradedFromAgentParameters = true;
+ }
+
+ ///
+ /// Called by Unity immediately after deserializing this object.
+ ///
+ ///
+ /// The Agent class uses OnAfterDeserialize() for internal housekeeping. Call the
+ /// base class implementation if you need your own custom deserialization logic.
+ ///
+ /// See [OnAfterDeserialize] for more information.
+ ///
+ /// [OnAfterDeserialize]: https://docs.unity3d.com/ScriptReference/ISerializationCallbackReceiver.OnAfterDeserialize.html
+ ///
+ ///
+ ///
+ /// public new void OnAfterDeserialize()
+ /// {
+ /// base.OnAfterDeserialize();
+ /// // additional deserialization logic...
+ /// }
+ ///
+ ///
+ public void OnAfterDeserialize()
+ {
+ // Manages a serialization upgrade issue from v0.13 to v0.14 where MaxStep moved
+ // from AgentParameters (since removed) to Agent
+ if (MaxStep == 0 && MaxStep != agentParameters.maxStep && !hasUpgradedFromAgentParameters)
+ {
+ MaxStep = agentParameters.maxStep;
+ }
+ hasUpgradedFromAgentParameters = true;
+ }
+
+ ///
+ /// Initializes the agent. Can be safely called multiple times.
+ ///
+ ///
+ /// This function calls your implementation, if one exists.
+ ///
+ public void LazyInitialize()
+ {
+ if (m_Initialized)
+ {
+ return;
+ }
+ m_Initialized = true;
+
+ // Grab the "static" properties for the Agent.
+ m_EpisodeId = EpisodeIdCounter.GetEpisodeId();
+ m_PolicyFactory = GetComponent();
+
+ m_Info = new AgentInfo();
+ sensors = new List();
+
+ Academy.Instance.AgentIncrementStep += AgentIncrementStep;
+ Academy.Instance.AgentSendState += SendInfo;
+ Academy.Instance.DecideAction += DecideAction;
+ Academy.Instance.AgentAct += AgentStep;
+ Academy.Instance.AgentForceReset += _AgentReset;
+
+ using (TimerStack.Instance.Scoped("InitializeActuators"))
+ {
+ InitializeActuators();
+ }
+
+ m_Brain = m_PolicyFactory.GeneratePolicy(m_ActuatorManager.GetCombinedActionSpec(), m_ActuatorManager);
+ ResetData();
+ Initialize();
+
+ using (TimerStack.Instance.Scoped("InitializeSensors"))
+ {
+ InitializeSensors();
+ }
+
+ m_Info.storedActions = new ActionBuffers(
+ new float[m_ActuatorManager.NumContinuousActions],
+ new int[m_ActuatorManager.NumDiscreteActions]
+ );
+
+ m_Info.groupId = m_GroupId;
+
+ // The first time the Academy resets, all Agents in the scene will be
+ // forced to reset through the event.
+ // To avoid the Agent resetting twice, the Agents will not begin their
+ // episode when initializing until after the Academy had its first reset.
+ if (Academy.Instance.TotalStepCount != 0)
+ {
+ using (m_OnEpisodeBeginChecker.Start())
+ {
+ OnEpisodeBegin();
+ }
+ }
+ }
+
+ ///
+ /// The reason that the Agent has been set to "done".
+ ///
+ enum DoneReason
+ {
+ ///
+ /// The episode was ended manually by calling .
+ ///
+ DoneCalled,
+
+ ///
+ /// The max steps for the Agent were reached.
+ ///
+ MaxStepReached,
+
+ ///
+ /// The Agent was disabled.
+ ///
+ Disabled,
+ }
+
+ ///
+ /// Called when the attached [GameObject] becomes disabled and inactive.
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ ///
+ ///
+ /// Always call the base Agent class version of this function if you implement `OnDisable()`
+ /// in your own Agent subclasses.
+ ///
+ ///
+ ///
+ /// protected override void OnDisable()
+ /// {
+ /// base.OnDisable();
+ /// // additional OnDisable logic...
+ /// }
+ ///
+ ///
+ ///
+ protected virtual void OnDisable()
+ {
+ DemonstrationWriters.Clear();
+
+ // If Academy.Dispose has already been called, we don't need to unregister with it.
+ // We don't want to even try, because this will lazily create a new Academy!
+ if (Academy.IsInitialized)
+ {
+ Academy.Instance.AgentIncrementStep -= AgentIncrementStep;
+ Academy.Instance.AgentSendState -= SendInfo;
+ Academy.Instance.DecideAction -= DecideAction;
+ Academy.Instance.AgentAct -= AgentStep;
+ Academy.Instance.AgentForceReset -= _AgentReset;
+ NotifyAgentDone(DoneReason.Disabled);
+ }
+
+ CleanupSensors();
+ m_Brain?.Dispose();
+ OnAgentDisabled?.Invoke(this);
+ m_Initialized = false;
+ }
+
+ void NotifyAgentDone(DoneReason doneReason)
+ {
+ if (m_Info.done)
+ {
+ // The Agent was already marked as Done and should not be notified again
+ return;
+ }
+ m_Info.episodeId = m_EpisodeId;
+ m_Info.reward = m_Reward;
+ m_Info.groupReward = m_GroupReward;
+ m_Info.done = true;
+ m_Info.maxStepReached = doneReason == DoneReason.MaxStepReached;
+ m_Info.groupId = m_GroupId;
+ UpdateSensors();
+ // Make sure the latest observations are being passed to training.
+ using (m_CollectObservationsChecker.Start())
+ {
+ CollectObservations(collectObservationsSensor);
+ }
+ // Request the last decision with no callbacks
+ // We request a decision so Python knows the Agent is done immediately
+ m_Brain?.RequestDecision(m_Info, sensors);
+
+ // We also have to write any to any DemonstationStores so that they get the "done" flag.
+ if (DemonstrationWriters.Count != 0)
+ {
+ foreach (var demoWriter in DemonstrationWriters)
+ {
+ demoWriter.Record(m_Info, sensors);
+ }
+ }
+
+ ResetSensors();
+
+ if (doneReason != DoneReason.Disabled)
+ {
+ // We don't want to update the reward stats when the Agent is disabled, because this will make
+ // the rewards look lower than they actually are during shutdown.
+ m_CompletedEpisodes++;
+ UpdateRewardStats();
+ }
+
+ m_Reward = 0f;
+ m_GroupReward = 0f;
+ m_CumulativeReward = 0f;
+ m_RequestAction = false;
+ m_RequestDecision = false;
+ m_Info.storedActions.Clear();
+ }
+
+ ///
+ /// Updates the Model assigned to this Agent instance.
+ ///
+ ///
+ /// If the agent already has an assigned model, that model is replaced with the
+ /// the provided one. However, if you call this function with arguments that are
+ /// identical to the current parameters of the agent, then no changes are made.
+ ///
+ /// **Note:** the parameter is ignored when not training.
+ /// The and parameters
+ /// are ignored when not using inference.
+ ///
+ /// The identifier of the behavior. This
+ /// will categorize the agent when training.
+ ///
+ /// The model to use for inference.
+ /// Define the device on which the model
+ /// will be run.
+ public void SetModel(
+ string behaviorName,
+ NNModel model,
+ InferenceDevice inferenceDevice = InferenceDevice.Default)
+ {
+ if (behaviorName == m_PolicyFactory.BehaviorName &&
+ model == m_PolicyFactory.Model &&
+ inferenceDevice == m_PolicyFactory.InferenceDevice)
+ {
+ // If everything is the same, don't make any changes.
+ return;
+ }
+ NotifyAgentDone(DoneReason.Disabled);
+ m_PolicyFactory.Model = model;
+ m_PolicyFactory.InferenceDevice = inferenceDevice;
+ m_PolicyFactory.BehaviorName = behaviorName;
+ ReloadPolicy();
+ }
+
+ internal void ReloadPolicy()
+ {
+ if (!m_Initialized)
+ {
+ // If we haven't initialized yet, no need to make any changes now; they'll
+ // happen in LazyInitialize later.
+ return;
+ }
+ m_Brain?.Dispose();
+ m_Brain = m_PolicyFactory.GeneratePolicy(m_ActuatorManager.GetCombinedActionSpec(), m_ActuatorManager);
+ }
+
+ ///
+ /// Returns the current step counter (within the current episode).
+ ///
+ ///
+ /// Current step count.
+ ///
+ public int StepCount
+ {
+ get { return m_StepCount; }
+ }
+
+ ///
+ /// Returns the number of episodes that the Agent has completed (either
+ /// was called, or maxSteps was reached).
+ ///
+ ///
+ /// Current episode count.
+ ///
+ public int CompletedEpisodes
+ {
+ get { return m_CompletedEpisodes; }
+ }
+
+ ///
+ /// Overrides the current step reward of the agent and updates the episode
+ /// reward accordingly.
+ ///
+ ///
+ /// This function replaces any rewards given to the agent during the current step.
+ /// Use to incrementally change the reward rather than
+ /// overriding it.
+ ///
+ /// Typically, you assign rewards in the Agent subclass's
+ /// implementation after carrying out the received action and evaluating its success.
+ ///
+ /// Rewards are used during reinforcement learning; they are ignored during inference.
+ ///
+ /// See [Agents - Rewards] for general advice on implementing rewards and [Reward Signals]
+ /// for information about mixing reward signals from curiosity and Generative Adversarial
+ /// Imitation Learning (GAIL) with rewards supplied through this method.
+ ///
+ /// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#rewards
+ /// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals
+ ///
+ /// The new value of the reward.
+ public void SetReward(float reward)
+ {
+ Utilities.DebugCheckNanAndInfinity(reward, nameof(reward), nameof(SetReward));
+ m_CumulativeReward += (reward - m_Reward);
+ m_Reward = reward;
+ }
+
+ ///
+ /// Increments the step and episode rewards by the provided value.
+ ///
+ /// Use a positive reward to reinforce desired behavior. You can use a
+ /// negative reward to penalize mistakes. Use to
+ /// set the reward assigned to the current step with a specific value rather than
+ /// increasing or decreasing it.
+ ///
+ /// Typically, you assign rewards in the Agent subclass's
+ /// implementation after carrying out the received action and evaluating its success.
+ ///
+ /// Rewards are used during reinforcement learning; they are ignored during inference.
+ ///
+ /// See [Agents - Rewards] for general advice on implementing rewards and [Reward Signals]
+ /// for information about mixing reward signals from curiosity and Generative Adversarial
+ /// Imitation Learning (GAIL) with rewards supplied through this method.
+ ///
+ /// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#rewards
+ /// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals
+ ///
+ /// Incremental reward value.
+ public void AddReward(float increment)
+ {
+ Utilities.DebugCheckNanAndInfinity(increment, nameof(increment), nameof(AddReward));
+ m_Reward += increment;
+ m_CumulativeReward += increment;
+ }
+
+ internal void SetGroupReward(float reward)
+ {
+ Utilities.DebugCheckNanAndInfinity(reward, nameof(reward), nameof(SetGroupReward));
+ m_GroupReward = reward;
+ }
+
+ internal void AddGroupReward(float increment)
+ {
+ Utilities.DebugCheckNanAndInfinity(increment, nameof(increment), nameof(AddGroupReward));
+ m_GroupReward += increment;
+ }
+
+ ///
+ /// Retrieves the episode reward for the Agent.
+ ///
+ /// The episode reward.
+ public float GetCumulativeReward()
+ {
+ return m_CumulativeReward;
+ }
+
+ void UpdateRewardStats()
+ {
+ var gaugeName = $"{m_PolicyFactory.BehaviorName}.CumulativeReward";
+ TimerStack.Instance.SetGauge(gaugeName, GetCumulativeReward());
+ }
+
+ ///
+ /// Sets the done flag to true and resets the agent.
+ ///
+ ///
+ /// This should be used when the episode can no longer continue, such as when the Agent
+ /// reaches the goal or fails at the task.
+ ///
+ ///
+ ///
+ public void EndEpisode()
+ {
+ EndEpisodeAndReset(DoneReason.DoneCalled);
+ }
+
+ ///
+ /// Indicate that the episode is over but not due to the "fault" of the Agent.
+ /// This has the same end result as calling , but has a
+ /// slightly different effect on training.
+ ///
+ ///
+ /// This should be used when the episode could continue, but has gone on for
+ /// a sufficient number of steps.
+ ///
+ ///
+ ///
+ public void EpisodeInterrupted()
+ {
+ EndEpisodeAndReset(DoneReason.MaxStepReached);
+ }
+
+ ///
+ /// Internal method to end the episode and reset the Agent.
+ ///
+ ///
+ void EndEpisodeAndReset(DoneReason reason)
+ {
+ NotifyAgentDone(reason);
+ _AgentReset();
+ }
+
+ ///
+ /// Requests a new decision for this agent.
+ ///
+ ///
+ /// Call `RequestDecision()` whenever an agent needs a decision. You often
+ /// want to request a decision every environment step. However, if an agent
+ /// cannot use the decision every step, then you can request a decision less
+ /// frequently.
+ ///
+ /// You can add a component to the agent's
+ /// [GameObject] to drive the agent's decision making. When you use this component,
+ /// do not call `RequestDecision()` separately.
+ ///
+ /// Note that this function calls ; you do not need to
+ /// call both functions at the same time.
+ ///
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ ///
+ public void RequestDecision()
+ {
+ m_RequestDecision = true;
+ RequestAction();
+ }
+
+ ///
+ /// Requests an action for this agent.
+ ///
+ ///
+ /// Call `RequestAction()` to repeat the previous action returned by the agent's
+ /// most recent decision. A new decision is not requested. When you call this function,
+ /// the Agent instance invokes with the
+ /// existing action vector.
+ ///
+ /// You can use `RequestAction()` in situations where an agent must take an action
+ /// every update, but doesn't need to make a decision as often. For example, an
+ /// agent that moves through its environment might need to apply an action to keep
+ /// moving, but only needs to make a decision to change course or speed occasionally.
+ ///
+ /// You can add a component to the agent's
+ /// [GameObject] to drive the agent's decision making and action frequency. When you
+ /// use this component, do not call `RequestAction()` separately.
+ ///
+ /// Note that calls `RequestAction()`; you do not need to
+ /// call both functions at the same time.
+ ///
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ ///
+ public void RequestAction()
+ {
+ m_RequestAction = true;
+ }
+
+ /// Helper function that resets all the data structures associated with
+ /// the agent. Typically used when the agent is being initialized or reset
+ /// at the end of an episode.
+ void ResetData()
+ {
+ m_ActuatorManager?.ResetData();
+ }
+
+ ///
+ /// Implement `Initialize()` to perform one-time initialization or set up of the
+ /// Agent instance.
+ ///
+ ///
+ /// `Initialize()` is called once when the agent is first enabled. If, for example,
+ /// the Agent object needs references to other [GameObjects] in the scene, you
+ /// can collect and store those references here.
+ ///
+ /// Note that is called at the start of each of
+ /// the agent's "episodes". You can use that function for items that need to be reset
+ /// for each episode.
+ ///
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ ///
+ public virtual void Initialize() { }
+
+ ///
+ /// Implement to choose an action for this agent using a custom heuristic.
+ ///
+ ///
+ /// Implement this function to provide custom decision making logic or to support manual
+ /// control of an agent using keyboard, mouse, game controller input, or a script.
+ ///
+ /// Your heuristic implementation can use any decision making logic you specify. Assign decision
+ /// values to the and
+ /// arrays , passed to your function as a parameter.
+ /// The same array will be reused between steps. It is up to the user to initialize
+ /// the values on each call, for example by calling `Array.Clear(actionsOut, 0, actionsOut.Length);`.
+ /// Add values to the array at the same indexes as they are used in your
+ /// function, which receives this array and
+ /// implements the corresponding agent behavior. See [Actions] for more information
+ /// about agent actions.
+ /// Note : Do not create a new float array of action in the `Heuristic()` method,
+ /// as this will prevent writing floats to the original action array.
+ ///
+ /// An agent calls this `Heuristic()` function to make a decision when you set its behavior
+ /// type to . The agent also calls this function if
+ /// you set its behavior type to when the
+ /// is not connected to an external training process and you do not
+ /// assign a trained model to the agent.
+ ///
+ /// To perform imitation learning, implement manual control of the agent in the `Heuristic()`
+ /// function so that you can record the demonstrations required for the imitation learning
+ /// algorithms. (Attach a [Demonstration Recorder] component to the agent's [GameObject] to
+ /// record the demonstration session to a file.)
+ ///
+ /// Even when you don’t plan to use heuristic decisions for an agent or imitation learning,
+ /// implementing a simple heuristic function can aid in debugging agent actions and interactions
+ /// with its environment.
+ ///
+ /// [Demonstration Recorder]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#recording-demonstrations
+ /// [Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#actions
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ ///
+ ///
+ /// The following example illustrates a `Heuristic()` function that provides WASD-style
+ /// keyboard control for an agent that can move in two dimensions as well as jump. See
+ /// [Input Manager] for more information about the built-in Unity input functions.
+ /// You can also use the [Input System package], which provides a more flexible and
+ /// configurable input system.
+ ///
+ /// public override void Heuristic(in ActionBuffers actionsOut)
+ /// {
+ /// var continuousActionsOut = actionsOut.ContinuousActions;
+ /// continuousActionsOut[0] = Input.GetAxis("Horizontal");
+ /// continuousActionsOut[1] = Input.GetKey(KeyCode.Space) ? 1.0f : 0.0f;
+ /// continuousActionsOut[2] = Input.GetAxis("Vertical");
+ /// }
+ ///
+ /// [Input Manager]: https://docs.unity3d.com/Manual/class-InputManager.html
+ /// [Input System package]: https://docs.unity3d.com/Packages/com.unity.inputsystem@1.0/manual/index.html
+ ///
+ /// The which contain the continuous and
+ /// discrete action buffers to write to.
+ ///
+ public virtual void Heuristic(in ActionBuffers actionsOut)
+ {
+ Debug.LogWarning("Heuristic method called but not implemented. Returning placeholder actions.");
+ }
+
+ ///
+ /// Set up the list of ISensors on the Agent. By default, this will select any
+ /// SensorComponent's attached to the Agent.
+ ///
+ internal void InitializeSensors()
+ {
+ if (m_PolicyFactory == null)
+ {
+ m_PolicyFactory = GetComponent();
+ }
+ if (m_PolicyFactory.ObservableAttributeHandling != ObservableAttributeOptions.Ignore)
+ {
+ var excludeInherited =
+ m_PolicyFactory.ObservableAttributeHandling == ObservableAttributeOptions.ExcludeInherited;
+ using (TimerStack.Instance.Scoped("CreateObservableSensors"))
+ {
+ var observableSensors = ObservableAttribute.CreateObservableSensors(this, excludeInherited);
+ sensors.AddRange(observableSensors);
+ }
+ }
+
+ // Get all attached sensor components
+ SensorComponent[] attachedSensorComponents;
+ if (m_PolicyFactory.UseChildSensors)
+ {
+ attachedSensorComponents = GetComponentsInChildren();
+ }
+ else
+ {
+ attachedSensorComponents = GetComponents();
+ }
+
+ sensors.Capacity += attachedSensorComponents.Length;
+ foreach (var component in attachedSensorComponents)
+ {
+ sensors.AddRange(component.CreateSensors());
+ }
+
+ // Support legacy CollectObservations
+ var param = m_PolicyFactory.BrainParameters;
+ if (param.VectorObservationSize > 0)
+ {
+ collectObservationsSensor = new VectorSensor(param.VectorObservationSize);
+ if (param.NumStackedVectorObservations > 1)
+ {
+ stackedCollectObservationsSensor = new StackingSensor(
+ collectObservationsSensor, param.NumStackedVectorObservations);
+ sensors.Add(stackedCollectObservationsSensor);
+ }
+ else
+ {
+ sensors.Add(collectObservationsSensor);
+ }
+ }
+
+ // Sort the Sensors by name to ensure determinism
+ SensorUtils.SortSensors(sensors);
+
+#if DEBUG
+ // Make sure the names are actually unique
+
+ for (var i = 0; i < sensors.Count - 1; i++)
+ {
+ Debug.Assert(
+ !sensors[i].GetName().Equals(sensors[i + 1].GetName()),
+ "Sensor names must be unique.");
+ }
+#endif
+ }
+
+ void CleanupSensors()
+ {
+ // Dispose all attached sensor
+ for (var i = 0; i < sensors.Count; i++)
+ {
+ var sensor = sensors[i];
+ if (sensor is IDisposable disposableSensor)
+ {
+ disposableSensor.Dispose();
+ }
+ }
+ }
+
+ void InitializeActuators()
+ {
+ ActuatorComponent[] attachedActuators;
+ if (m_PolicyFactory.UseChildActuators)
+ {
+ attachedActuators = GetComponentsInChildren();
+ }
+ else
+ {
+ attachedActuators = GetComponents();
+ }
+
+ // Support legacy OnActionReceived
+ // TODO don't set this up if the sizes are 0?
+ var param = m_PolicyFactory.BrainParameters;
+ m_VectorActuator = new AgentVectorActuator(this, this, param.ActionSpec);
+ m_ActuatorManager = new ActuatorManager(attachedActuators.Length + 1);
+
+ m_ActuatorManager.Add(m_VectorActuator);
+
+ foreach (var actuatorComponent in attachedActuators)
+ {
+ m_ActuatorManager.AddActuators(actuatorComponent.CreateActuators());
+ }
+ }
+
+ ///
+ /// Sends the Agent info to the linked Brain.
+ ///
+ void SendInfoToBrain()
+ {
+ if (!m_Initialized)
+ {
+ throw new UnityAgentsException("Call to SendInfoToBrain when Agent hasn't been initialized." +
+ "Please ensure that you are calling 'base.OnEnable()' if you have overridden OnEnable.");
+ }
+
+ if (m_Brain == null)
+ {
+ return;
+ }
+
+ if (m_Info.done)
+ {
+ m_Info.ClearActions();
+ }
+ else
+ {
+ m_Info.CopyActions(m_ActuatorManager.StoredActions);
+ }
+
+ UpdateSensors();
+ using (TimerStack.Instance.Scoped("CollectObservations"))
+ {
+ using (m_CollectObservationsChecker.Start())
+ {
+ CollectObservations(collectObservationsSensor);
+ }
+ }
+ using (TimerStack.Instance.Scoped("WriteActionMask"))
+ {
+ m_ActuatorManager.WriteActionMask();
+ }
+
+ m_Info.discreteActionMasks = m_ActuatorManager.DiscreteActionMask?.GetMask();
+ m_Info.reward = m_Reward;
+ m_Info.groupReward = m_GroupReward;
+ m_Info.done = false;
+ m_Info.maxStepReached = false;
+ m_Info.episodeId = m_EpisodeId;
+ m_Info.groupId = m_GroupId;
+
+ using (TimerStack.Instance.Scoped("RequestDecision"))
+ {
+ m_Brain.RequestDecision(m_Info, sensors);
+ }
+
+ // If we have any DemonstrationWriters, write the AgentInfo and sensors to them.
+ if (DemonstrationWriters.Count != 0)
+ {
+ foreach (var demoWriter in DemonstrationWriters)
+ {
+ demoWriter.Record(m_Info, sensors);
+ }
+ }
+ }
+
+ void UpdateSensors()
+ {
+ foreach (var sensor in sensors)
+ {
+ sensor.Update();
+ }
+ }
+
+ void ResetSensors()
+ {
+ foreach (var sensor in sensors)
+ {
+ sensor.Reset();
+ }
+ }
+
+ ///
+ /// Implement `CollectObservations()` to collect the vector observations of
+ /// the agent for the step. The agent observation describes the current
+ /// environment from the perspective of the agent.
+ ///
+ ///
+ /// The vector observations for the agent.
+ ///
+ ///
+ /// An agent's observation is any environment information that helps
+ /// the agent achieve its goal. For example, for a fighting agent, its
+ /// observation could include distances to friends or enemies, or the
+ /// current level of ammunition at its disposal.
+ ///
+ /// You can use a combination of vector, visual, and raycast observations for an
+ /// agent. If you only use visual or raycast observations, you do not need to
+ /// implement a `CollectObservations()` function.
+ ///
+ /// Add vector observations to the parameter passed to
+ /// this method by calling the helper methods:
+ /// -
+ /// -
+ /// -
+ /// -
+ /// -
+ /// -
+ /// -
+ /// -
+ ///
+ /// You can use any combination of these helper functions to build the agent's
+ /// vector of observations. You must build the vector in the same order
+ /// each time `CollectObservations()` is called and the length of the vector
+ /// must always be the same. In addition, the length of the observation must
+ /// match the
+ /// attribute of the linked Brain, which is set in the Editor on the
+ /// **Behavior Parameters** component attached to the agent's [GameObject].
+ ///
+ /// For more information about observations, see [Observations and Sensors].
+ ///
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ /// [Observations and Sensors]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#observations-and-sensors
+ ///
+ public virtual void CollectObservations(VectorSensor sensor)
+ {
+ }
+
+ ///
+ /// Returns a read-only view of the observations that were generated in
+ /// . This is mainly useful inside of a
+ /// method to avoid recomputing the observations.
+ ///
+ /// A read-only view of the observations list.
+ public ReadOnlyCollection GetObservations()
+ {
+ return collectObservationsSensor.GetObservations();
+ }
+
+ ///
+ /// Returns a read-only view of the stacked observations that were generated in
+ /// . This is mainly useful inside of a
+ /// method to avoid recomputing the observations.
+ ///
+ /// A read-only view of the stacked observations list.
+ public ReadOnlyCollection GetStackedObservations()
+ {
+ return stackedCollectObservationsSensor.GetStackedObservations();
+ }
+
+ ///
+ /// Implement `WriteDiscreteActionMask()` to collects the masks for discrete
+ /// actions. When using discrete actions, the agent will not perform the masked
+ /// action.
+ ///
+ ///
+ /// The action mask for the agent.
+ ///
+ ///
+ /// When using Discrete Control, you can prevent the Agent from using a certain
+ /// action by masking it with .
+ ///
+ /// See [Agents - Actions] for more information on masking actions.
+ ///
+ /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#actions
+ ///
+ ///
+ public virtual void WriteDiscreteActionMask(IDiscreteActionMask actionMask) { }
+
+ ///
+ /// Implement `OnActionReceived()` to specify agent behavior at every step, based
+ /// on the provided action.
+ ///
+ ///
+ /// An action is passed to this function in the form of an .
+ /// Your implementation must use the array to direct the agent's behavior for the
+ /// current step.
+ ///
+ /// You decide how many elements you need in the ActionBuffers to control your
+ /// agent and what each element means. For example, if you want to apply a
+ /// force to move an agent around the environment, you can arbitrarily pick
+ /// three values in ActionBuffers.ContinuousActions array to use as the force components.
+ /// During training, the agent's policy learns to set those particular elements of
+ /// the array to maximize the training rewards the agent receives. (Of course,
+ /// if you implement a function, it must use the same
+ /// elements of the action array for the same purpose since there is no learning
+ /// involved.)
+ ///
+ /// An Agent can use continuous and/or discrete actions. Configure this along with the size
+ /// of the action array, in the of the agent's associated
+ /// component.
+ ///
+ /// When an agent uses continuous actions, the values in the ActionBuffers.ContinuousActions
+ /// array are floating point numbers. You should clamp the values to the range,
+ /// -1..1, to increase numerical stability during training.
+ ///
+ /// When an agent uses discrete actions, the values in the ActionBuffers.DiscreteActions array
+ /// are integers that each represent a specific, discrete action. For example,
+ /// you could define a set of discrete actions such as:
+ ///
+ ///
+ /// 0 = Do nothing
+ /// 1 = Move one space left
+ /// 2 = Move one space right
+ /// 3 = Move one space up
+ /// 4 = Move one space down
+ ///
+ ///
+ /// When making a decision, the agent picks one of the five actions and puts the
+ /// corresponding integer value in the ActionBuffers.DiscreteActions array. For example, if the agent
+ /// decided to move left, the ActionBuffers.DiscreteActions parameter would be an array with
+ /// a single element with the value 1.
+ ///
+ /// You can define multiple sets, or branches, of discrete actions to allow an
+ /// agent to perform simultaneous, independent actions. For example, you could
+ /// use one branch for movement and another branch for throwing a ball left, right,
+ /// up, or down, to allow the agent to do both in the same step.
+ ///
+ /// The ActionBuffers.DiscreteActions array of an agent with discrete actions contains one
+ /// element for each branch. The value of each element is the integer representing the
+ /// chosen action for that branch. The agent always chooses one action for each branch.
+ ///
+ /// When you use the discrete actions, you can prevent the training process
+ /// or the neural network model from choosing specific actions in a step by
+ /// implementing the
+ /// method. For example, if your agent is next to a wall, you could mask out any
+ /// actions that would result in the agent trying to move into the wall.
+ ///
+ /// For more information about implementing agent actions see [Agents - Actions].
+ ///
+ /// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs/Learning-Environment-Design-Agents.md#actions
+ ///
+ ///
+ /// Struct containing the buffers of actions to be executed at this step.
+ ///
+ public virtual void OnActionReceived(ActionBuffers actions) { }
+
+ ///
+ /// Implement `OnEpisodeBegin()` to set up an Agent instance at the beginning
+ /// of an episode.
+ ///
+ ///
+ ///
+ public virtual void OnEpisodeBegin() { }
+
+ ///
+ /// Gets the most recent ActionBuffer for this agent.
+ ///
+ /// The most recent ActionBuffer for this agent
+ public ActionBuffers GetStoredActionBuffers()
+ {
+ return m_ActuatorManager.StoredActions;
+ }
+
+ ///
+ /// An internal reset method that updates internal data structures in
+ /// addition to calling .
+ ///
+ void _AgentReset()
+ {
+ ResetData();
+ m_StepCount = 0;
+ using (m_OnEpisodeBeginChecker.Start())
+ {
+ OnEpisodeBegin();
+ }
+ }
+
+ ///
+ /// Scales continuous action from [-1, 1] to arbitrary range.
+ ///
+ /// The input action value.
+ /// The minimum output value.
+ /// The maximum output value.
+ /// The scaled from [-1,1] to
+ /// [, ].
+ protected static float ScaleAction(float rawAction, float min, float max)
+ {
+ var middle = (min + max) / 2;
+ var range = (max - min) / 2;
+ return rawAction * range + middle;
+ }
+
+ ///
+ /// Signals the agent that it must send its decision to the brain.
+ ///
+ void SendInfo()
+ {
+ // If the Agent is done, it has just reset and thus requires a new decision
+ if (m_RequestDecision)
+ {
+ SendInfoToBrain();
+ m_Reward = 0f;
+ m_GroupReward = 0f;
+ m_RequestDecision = false;
+ }
+ }
+
+ void AgentIncrementStep()
+ {
+ m_StepCount += 1;
+ }
+
+ /// Used by the brain to make the agent perform a step.
+ void AgentStep()
+ {
+ if ((m_RequestAction) && (m_Brain != null))
+ {
+ m_RequestAction = false;
+ m_ActuatorManager.ExecuteActions();
+ }
+
+ if ((m_StepCount >= MaxStep) && (MaxStep > 0))
+ {
+ NotifyAgentDone(DoneReason.MaxStepReached);
+ _AgentReset();
+ }
+ }
+
+ void DecideAction()
+ {
+ if (m_ActuatorManager.StoredActions.ContinuousActions.Array == null)
+ {
+ ResetData();
+ }
+ var actions = m_Brain?.DecideAction() ?? new ActionBuffers();
+ m_Info.CopyActions(actions);
+ m_ActuatorManager.UpdateActions(actions);
+ }
+
+ internal void SetMultiAgentGroup(IMultiAgentGroup multiAgentGroup)
+ {
+ if (multiAgentGroup == null)
+ {
+ m_GroupId = 0;
+ }
+ else
+ {
+ var newGroupId = multiAgentGroup.GetId();
+ if (m_GroupId == 0 || m_GroupId == newGroupId)
+ {
+ m_GroupId = newGroupId;
+ }
+ else
+ {
+ throw new UnityAgentsException("Agent is already registered with a group. Unregister it first.");
+ }
+ }
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Agent.cs.meta b/com.unity.ml-agents/Runtime/Agent.cs.meta
new file mode 100644
index 0000000000..5463d244fb
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Agent.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 88b6042bc9a5d4aa58d931eae49442e5
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Analytics.meta b/com.unity.ml-agents/Runtime/Analytics.meta
new file mode 100644
index 0000000000..260b85a9b3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 8b12ac54c5224758af88c67e2af4a01e
+timeCreated: 1604359666
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Analytics/AnalyticsUtils.cs b/com.unity.ml-agents/Runtime/Analytics/AnalyticsUtils.cs
new file mode 100644
index 0000000000..b206f6bd98
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics/AnalyticsUtils.cs
@@ -0,0 +1,66 @@
+using System;
+using System.Text;
+using System.Security.Cryptography;
+using UnityEngine;
+
+namespace Unity.MLAgents.Analytics
+{
+
+ internal static class AnalyticsUtils
+ {
+ ///
+ /// Conversion function from byte array to hex string
+ ///
+ ///
+ /// A byte array to be hex encoded.
+ private static string ToHexString(byte[] array)
+ {
+ StringBuilder hex = new StringBuilder(array.Length * 2);
+ foreach (byte b in array)
+ {
+ hex.AppendFormat("{0:x2}", b);
+ }
+ return hex.ToString();
+ }
+
+ ///
+ /// Hash a string to remove PII or secret info before sending to analytics
+ ///
+ ///
+ /// A string containing the key to be used for HMAC encoding.
+ ///
+ /// A string containing the value to be encoded.
+ public static string Hash(string key, string value)
+ {
+ string hash;
+ UTF8Encoding encoder = new UTF8Encoding();
+ using (HMACSHA256 hmac = new HMACSHA256(encoder.GetBytes(key)))
+ {
+ Byte[] hmBytes = hmac.ComputeHash(encoder.GetBytes(value));
+ hash = ToHexString(hmBytes);
+ }
+ return hash;
+ }
+
+ internal static bool s_SendEditorAnalytics = true;
+
+ ///
+ /// Helper class to temporarily disable sending analytics from unit tests.
+ ///
+ internal class DisableAnalyticsSending : IDisposable
+ {
+ private bool m_PreviousSendEditorAnalytics;
+
+ public DisableAnalyticsSending()
+ {
+ m_PreviousSendEditorAnalytics = s_SendEditorAnalytics;
+ s_SendEditorAnalytics = false;
+ }
+
+ public void Dispose()
+ {
+ s_SendEditorAnalytics = m_PreviousSendEditorAnalytics;
+ }
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Analytics/AnalyticsUtils.cs.meta b/com.unity.ml-agents/Runtime/Analytics/AnalyticsUtils.cs.meta
new file mode 100644
index 0000000000..b00fab1c90
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics/AnalyticsUtils.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: af1ef3e70f1242938d7b39284b1a892b
+timeCreated: 1610575760
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Analytics/Events.cs b/com.unity.ml-agents/Runtime/Analytics/Events.cs
new file mode 100644
index 0000000000..4a34273c04
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics/Events.cs
@@ -0,0 +1,194 @@
+using System;
+using System.Collections.Generic;
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.Sensors;
+
+namespace Unity.MLAgents.Analytics
+{
+ internal struct InferenceEvent
+ {
+ ///
+ /// Hash of the BehaviorName.
+ ///
+ public string BehaviorName;
+ public string BarracudaModelSource;
+ public string BarracudaModelVersion;
+ public string BarracudaModelProducer;
+ public string BarracudaPackageVersion;
+ ///
+ /// Whether inference is performed on CPU (0) or GPU (1).
+ ///
+ public int InferenceDevice;
+ public List ObservationSpecs;
+ public EventActionSpec ActionSpec;
+ public List ActuatorInfos;
+ public int MemorySize;
+ public long TotalWeightSizeBytes;
+ public string ModelHash;
+ }
+
+ ///
+ /// Simplified version of ActionSpec struct for use in analytics
+ ///
+ [Serializable]
+ internal struct EventActionSpec
+ {
+ public int NumContinuousActions;
+ public int NumDiscreteActions;
+ public int[] BranchSizes;
+
+ public static EventActionSpec FromActionSpec(ActionSpec actionSpec)
+ {
+ var branchSizes = actionSpec.BranchSizes ?? Array.Empty();
+ return new EventActionSpec
+ {
+ NumContinuousActions = actionSpec.NumContinuousActions,
+ NumDiscreteActions = actionSpec.NumDiscreteActions,
+ BranchSizes = branchSizes,
+ };
+ }
+ }
+
+ ///
+ /// Information about an actuator.
+ ///
+ [Serializable]
+ internal struct EventActuatorInfo
+ {
+ public int BuiltInActuatorType;
+ public int NumContinuousActions;
+ public int NumDiscreteActions;
+
+ public static EventActuatorInfo FromActuator(IActuator actuator)
+ {
+ BuiltInActuatorType builtInActuatorType = Actuators.BuiltInActuatorType.Unknown;
+ if (actuator is IBuiltInActuator builtInActuator)
+ {
+ builtInActuatorType = builtInActuator.GetBuiltInActuatorType();
+ }
+
+ var actionSpec = actuator.ActionSpec;
+
+ return new EventActuatorInfo
+ {
+ BuiltInActuatorType = (int)builtInActuatorType,
+ NumContinuousActions = actionSpec.NumContinuousActions,
+ NumDiscreteActions = actionSpec.NumDiscreteActions
+ };
+ }
+ }
+
+ ///
+ /// Information about one dimension of an observation.
+ ///
+ [Serializable]
+ internal struct EventObservationDimensionInfo
+ {
+ public int Size;
+ public int Flags;
+ }
+
+ ///
+ /// Simplified summary of Agent observations for use in analytics
+ ///
+ [Serializable]
+ internal struct EventObservationSpec
+ {
+ public string SensorName;
+ public string CompressionType;
+ public int BuiltInSensorType;
+ public int ObservationType;
+ public EventObservationDimensionInfo[] DimensionInfos;
+
+ public static EventObservationSpec FromSensor(ISensor sensor)
+ {
+ var obsSpec = sensor.GetObservationSpec();
+ var shape = obsSpec.Shape;
+ var dimProps = obsSpec.DimensionProperties;
+ var dimInfos = new EventObservationDimensionInfo[shape.Length];
+ for (var i = 0; i < shape.Length; i++)
+ {
+ dimInfos[i].Size = shape[i];
+ dimInfos[i].Flags = (int)dimProps[i];
+ }
+
+ var builtInSensorType =
+ (sensor as IBuiltInSensor)?.GetBuiltInSensorType() ?? Sensors.BuiltInSensorType.Unknown;
+
+ return new EventObservationSpec
+ {
+ SensorName = sensor.GetName(),
+ CompressionType = sensor.GetCompressionSpec().SensorCompressionType.ToString(),
+ BuiltInSensorType = (int)builtInSensorType,
+ ObservationType = (int)obsSpec.ObservationType,
+ DimensionInfos = dimInfos,
+ };
+ }
+ }
+
+ internal struct RemotePolicyInitializedEvent
+ {
+ public string TrainingSessionGuid;
+ ///
+ /// Hash of the BehaviorName.
+ ///
+ public string BehaviorName;
+ public List ObservationSpecs;
+ public EventActionSpec ActionSpec;
+ public List ActuatorInfos;
+
+ ///
+ /// This will be the same as TrainingEnvironmentInitializedEvent if available, but
+ /// TrainingEnvironmentInitializedEvent maybe not always be available with older trainers.
+ ///
+ public string MLAgentsEnvsVersion;
+ public string TrainerCommunicationVersion;
+ }
+
+ internal struct TrainingEnvironmentInitializedEvent
+ {
+ public string TrainingSessionGuid;
+
+ public string TrainerPythonVersion;
+ public string MLAgentsVersion;
+ public string MLAgentsEnvsVersion;
+ public string TorchVersion;
+ public string TorchDeviceType;
+ public int NumEnvironments;
+ public int NumEnvironmentParameters;
+ public string RunOptions;
+ }
+
+ [Flags]
+ internal enum RewardSignals
+ {
+ Extrinsic = 1 << 0,
+ Gail = 1 << 1,
+ Curiosity = 1 << 2,
+ Rnd = 1 << 3,
+ }
+
+ [Flags]
+ internal enum TrainingFeatures
+ {
+ BehavioralCloning = 1 << 0,
+ Recurrent = 1 << 1,
+ Threaded = 1 << 2,
+ SelfPlay = 1 << 3,
+ Curriculum = 1 << 4,
+ }
+
+ internal struct TrainingBehaviorInitializedEvent
+ {
+ public string TrainingSessionGuid;
+
+ public string BehaviorName;
+ public string TrainerType;
+ public RewardSignals RewardSignalFlags;
+ public TrainingFeatures TrainingFeatureFlags;
+ public string VisualEncoder;
+ public int NumNetworkLayers;
+ public int NumNetworkHiddenUnits;
+ public string Config;
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Analytics/Events.cs.meta b/com.unity.ml-agents/Runtime/Analytics/Events.cs.meta
new file mode 100644
index 0000000000..347eebcd51
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics/Events.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 0a0d7cda6d74425a80775769a9283ba6
+timeCreated: 1604359798
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs b/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs
new file mode 100644
index 0000000000..b7b466155a
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs
@@ -0,0 +1,283 @@
+using System.Collections.Generic;
+using System.Diagnostics;
+using Unity.Barracuda;
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.Inference;
+using Unity.MLAgents.Policies;
+using Unity.MLAgents.Sensors;
+using UnityEngine;
+
+#if MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+using UnityEngine.Analytics;
+#endif
+
+
+#if UNITY_EDITOR
+using UnityEditor;
+#if MLA_UNITY_ANALYTICS_MODULE
+using UnityEditor.Analytics;
+#endif // MLA_UNITY_ANALYTICS_MODULE
+#endif // UNITY_EDITOR
+
+
+namespace Unity.MLAgents.Analytics
+{
+ internal class InferenceAnalytics
+ {
+ const string k_VendorKey = "unity.ml-agents";
+ const string k_EventName = "ml_agents_inferencemodelset";
+ const int k_EventVersion = 1;
+
+ ///
+ /// Whether or not we've registered this particular event yet
+ ///
+ static bool s_EventRegistered;
+
+ ///
+ /// Hourly limit for this event name
+ ///
+ const int k_MaxEventsPerHour = 1000;
+
+ ///
+ /// Maximum number of items in this event.
+ ///
+ const int k_MaxNumberOfElements = 1000;
+
+
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ ///
+ /// Models that we've already sent events for.
+ ///
+ private static HashSet s_SentModels;
+#endif
+
+ static bool EnableAnalytics()
+ {
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ if (s_EventRegistered)
+ {
+ return true;
+ }
+
+ AnalyticsResult result = EditorAnalytics.RegisterEventWithLimit(k_EventName, k_MaxEventsPerHour, k_MaxNumberOfElements, k_VendorKey, k_EventVersion);
+ if (result == AnalyticsResult.Ok)
+ {
+ s_EventRegistered = true;
+ }
+ if (s_EventRegistered && s_SentModels == null)
+ {
+ s_SentModels = new HashSet();
+ }
+
+#else // no editor, no analytics
+ s_EventRegistered = false;
+#endif
+ return s_EventRegistered;
+ }
+
+ public static bool IsAnalyticsEnabled()
+ {
+#if UNITY_EDITOR
+ return EditorAnalytics.enabled;
+#else
+ return false;
+#endif
+ }
+
+ ///
+ /// Send an analytics event for the NNModel when it is set up for inference.
+ /// No events will be sent if analytics are disabled, and at most one event
+ /// will be sent per model instance.
+ ///
+ /// The NNModel being used for inference.
+ /// The BehaviorName of the Agent using the model
+ /// Whether inference is being performed on the CPU or GPU
+ /// List of ISensors for the Agent. Used to generate information about the observation space.
+ /// ActionSpec for the Agent. Used to generate information about the action space.
+ /// List of IActuators for the Agent. Used to generate information about the action space.
+ ///
+ [Conditional("MLA_UNITY_ANALYTICS_MODULE")]
+ public static void InferenceModelSet(
+ NNModel nnModel,
+ string behaviorName,
+ InferenceDevice inferenceDevice,
+ IList sensors,
+ ActionSpec actionSpec,
+ IList actuators
+ )
+ {
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ // The event shouldn't be able to report if this is disabled but if we know we're not going to report
+ // Lets early out and not waste time gathering all the data
+ if (!IsAnalyticsEnabled())
+ return;
+
+ if (!EnableAnalytics())
+ return;
+
+ var added = s_SentModels.Add(nnModel);
+
+ if (!added)
+ {
+ // We previously added this model. Exit so we don't resend.
+ return;
+ }
+
+ var data = GetEventForModel(nnModel, behaviorName, inferenceDevice, sensors, actionSpec, actuators);
+ // Note - to debug, use JsonUtility.ToJson on the event.
+ // Debug.Log(JsonUtility.ToJson(data, true));
+ if (AnalyticsUtils.s_SendEditorAnalytics)
+ {
+ EditorAnalytics.SendEventWithLimit(k_EventName, data, k_EventVersion);
+ }
+#endif
+ }
+
+ ///
+ /// Generate an InferenceEvent for the model.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ internal static InferenceEvent GetEventForModel(
+ NNModel nnModel,
+ string behaviorName,
+ InferenceDevice inferenceDevice,
+ IList sensors,
+ ActionSpec actionSpec,
+ IList actuators
+ )
+ {
+ var barracudaModel = ModelLoader.Load(nnModel);
+ var inferenceEvent = new InferenceEvent();
+
+ // Hash the behavior name so that there's no concern about PII or "secret" data being leaked.
+ inferenceEvent.BehaviorName = AnalyticsUtils.Hash(k_VendorKey, behaviorName);
+
+ inferenceEvent.BarracudaModelSource = barracudaModel.IrSource;
+ inferenceEvent.BarracudaModelVersion = barracudaModel.IrVersion;
+ inferenceEvent.BarracudaModelProducer = barracudaModel.ProducerName;
+ inferenceEvent.MemorySize = (int)barracudaModel.GetTensorByName(TensorNames.MemorySize)[0];
+ inferenceEvent.InferenceDevice = (int)inferenceDevice;
+
+ if (barracudaModel.ProducerName == "Script")
+ {
+ // .nn files don't have these fields set correctly. Assign some placeholder values.
+ inferenceEvent.BarracudaModelSource = "NN";
+ inferenceEvent.BarracudaModelProducer = "tensorflow_to_barracuda.py";
+ }
+
+#if UNITY_EDITOR
+ var barracudaPackageInfo = UnityEditor.PackageManager.PackageInfo.FindForAssembly(typeof(Tensor).Assembly);
+ inferenceEvent.BarracudaPackageVersion = barracudaPackageInfo.version;
+#else
+ inferenceEvent.BarracudaPackageVersion = null;
+#endif
+
+ inferenceEvent.ActionSpec = EventActionSpec.FromActionSpec(actionSpec);
+ inferenceEvent.ObservationSpecs = new List(sensors.Count);
+ foreach (var sensor in sensors)
+ {
+ inferenceEvent.ObservationSpecs.Add(EventObservationSpec.FromSensor(sensor));
+ }
+
+ inferenceEvent.ActuatorInfos = new List(actuators.Count);
+ foreach (var actuator in actuators)
+ {
+ inferenceEvent.ActuatorInfos.Add(EventActuatorInfo.FromActuator(actuator));
+ }
+
+ inferenceEvent.TotalWeightSizeBytes = GetModelWeightSize(barracudaModel);
+ inferenceEvent.ModelHash = GetModelHash(barracudaModel);
+ return inferenceEvent;
+ }
+
+ ///
+ /// Compute the total model weight size in bytes.
+ /// This corresponds to the "Total weight size" display in the Barracuda inspector,
+ /// and the calculations are the same.
+ ///
+ ///
+ ///
+ static long GetModelWeightSize(Model barracudaModel)
+ {
+ long totalWeightsSizeInBytes = 0;
+ for (var l = 0; l < barracudaModel.layers.Count; ++l)
+ {
+ for (var d = 0; d < barracudaModel.layers[l].datasets.Length; ++d)
+ {
+ totalWeightsSizeInBytes += barracudaModel.layers[l].datasets[d].length;
+ }
+ }
+ return totalWeightsSizeInBytes;
+ }
+
+ ///
+ /// Wrapper around Hash128 that supports Append(float[], int, int)
+ ///
+ struct MLAgentsHash128
+ {
+ private Hash128 m_Hash;
+
+ public void Append(float[] values, int count)
+ {
+ if (values == null)
+ {
+ return;
+ }
+
+ // Pre-2020 versions of Unity don't have Hash128.Append() (can only hash strings and scalars)
+ // For these versions, we'll hash element by element.
+#if UNITY_2020_1_OR_NEWER
+ m_Hash.Append(values, 0, count);
+#else
+ for (var i = 0; i < count; i++)
+ {
+ var tempHash = new Hash128();
+ HashUtilities.ComputeHash128(ref values[i], ref tempHash);
+ HashUtilities.AppendHash(ref tempHash, ref m_Hash);
+ }
+#endif
+ }
+
+ public void Append(string value)
+ {
+ var tempHash = Hash128.Compute(value);
+ HashUtilities.AppendHash(ref tempHash, ref m_Hash);
+ }
+
+ public override string ToString()
+ {
+ return m_Hash.ToString();
+ }
+ }
+
+ ///
+ /// Compute a hash of the model's layer data and return it as a string.
+ /// A subset of the layer weights are used for performance.
+ /// This increases the chance of a collision, but this should still be extremely rare.
+ ///
+ ///
+ ///
+ static string GetModelHash(Model barracudaModel)
+ {
+ var hash = new MLAgentsHash128();
+
+ // Limit the max number of float bytes that we hash for performance.
+ const int kMaxFloats = 256;
+
+ foreach (var layer in barracudaModel.layers)
+ {
+ hash.Append(layer.name);
+ var numFloatsToHash = Mathf.Min(layer.weights.Length, kMaxFloats);
+ hash.Append(layer.weights, numFloatsToHash);
+ }
+
+ return hash.ToString();
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs.meta b/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs.meta
new file mode 100644
index 0000000000..e81b2ecbb6
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics/InferenceAnalytics.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: ac4c40c2394d481ebf602caa600a32f3
+timeCreated: 1604359787
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Analytics/TrainingAnalytics.cs b/com.unity.ml-agents/Runtime/Analytics/TrainingAnalytics.cs
new file mode 100644
index 0000000000..08c205bfc6
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics/TrainingAnalytics.cs
@@ -0,0 +1,276 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.Sensors;
+using UnityEngine;
+#if MLA_UNITY_ANALYTICS_MODULE
+
+#if ENABLE_CLOUD_SERVICES_ANALYTICS
+using UnityEngine.Analytics;
+#endif
+
+#if UNITY_EDITOR
+using UnityEditor.Analytics;
+#endif
+#endif
+
+#if UNITY_EDITOR
+using UnityEditor;
+#endif
+
+namespace Unity.MLAgents.Analytics
+{
+ internal static class TrainingAnalytics
+ {
+ const string k_VendorKey = "unity.ml-agents";
+ const string k_TrainingEnvironmentInitializedEventName = "ml_agents_training_environment_initialized";
+ const string k_TrainingBehaviorInitializedEventName = "ml_agents_training_behavior_initialized";
+ const string k_RemotePolicyInitializedEventName = "ml_agents_remote_policy_initialized";
+
+ private static readonly string[] s_EventNames =
+ {
+ k_TrainingEnvironmentInitializedEventName,
+ k_TrainingBehaviorInitializedEventName,
+ k_RemotePolicyInitializedEventName
+ };
+
+ ///
+ /// Hourly limit for this event name
+ ///
+ const int k_MaxEventsPerHour = 1000;
+
+ ///
+ /// Maximum number of items in this event.
+ ///
+ const int k_MaxNumberOfElements = 1000;
+
+ private static bool s_SentEnvironmentInitialized;
+
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ ///
+ /// Whether or not we've registered this particular event yet
+ ///
+ static bool s_EventsRegistered;
+
+ ///
+ /// Behaviors that we've already sent events for.
+ ///
+ private static HashSet s_SentRemotePolicyInitialized;
+ private static HashSet s_SentTrainingBehaviorInitialized;
+#endif
+
+ private static Guid s_TrainingSessionGuid;
+
+ // These are set when the RpcCommunicator connects
+ private static string s_TrainerPackageVersion = "";
+ private static string s_TrainerCommunicationVersion = "";
+
+ internal static bool EnableAnalytics()
+ {
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ if (s_EventsRegistered)
+ {
+ return true;
+ }
+ foreach (var eventName in s_EventNames)
+ {
+ AnalyticsResult result = EditorAnalytics.RegisterEventWithLimit(eventName, k_MaxEventsPerHour, k_MaxNumberOfElements, k_VendorKey);
+ if (result != AnalyticsResult.Ok)
+ {
+ return false;
+ }
+ }
+ s_EventsRegistered = true;
+
+ if (s_SentRemotePolicyInitialized == null)
+ {
+ s_SentRemotePolicyInitialized = new HashSet();
+ s_SentTrainingBehaviorInitialized = new HashSet();
+ s_TrainingSessionGuid = Guid.NewGuid();
+ }
+
+ return s_EventsRegistered;
+#else
+ return false;
+#endif // MLA_UNITY_ANALYTICS_MODULE
+ }
+
+ ///
+ /// Cache information about the trainer when it becomes available in the RpcCommunicator.
+ ///
+ ///
+ ///
+ [Conditional("MLA_UNITY_ANALYTICS_MODULE")]
+ public static void SetTrainerInformation(string packageVersion, string communicationVersion)
+ {
+ s_TrainerPackageVersion = packageVersion;
+ s_TrainerCommunicationVersion = communicationVersion;
+ }
+
+ public static bool IsAnalyticsEnabled()
+ {
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ return EditorAnalytics.enabled;
+#else
+ return false;
+#endif
+ }
+
+ [Conditional("MLA_UNITY_ANALYTICS_MODULE")]
+ public static void TrainingEnvironmentInitialized(TrainingEnvironmentInitializedEvent tbiEvent)
+ {
+ if (!IsAnalyticsEnabled())
+ return;
+
+ if (!EnableAnalytics())
+ return;
+
+ if (s_SentEnvironmentInitialized)
+ {
+ // We already sent an TrainingEnvironmentInitializedEvent. Exit so we don't resend.
+ return;
+ }
+
+ s_SentEnvironmentInitialized = true;
+ tbiEvent.TrainingSessionGuid = s_TrainingSessionGuid.ToString();
+
+ // Note - to debug, use JsonUtility.ToJson on the event.
+ // Debug.Log(
+ // $"Would send event {k_TrainingEnvironmentInitializedEventName} with body {JsonUtility.ToJson(tbiEvent, true)}"
+ // );
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ if (AnalyticsUtils.s_SendEditorAnalytics)
+ {
+ EditorAnalytics.SendEventWithLimit(k_TrainingEnvironmentInitializedEventName, tbiEvent);
+ }
+#endif
+ }
+
+ [Conditional("MLA_UNITY_ANALYTICS_MODULE")]
+ public static void RemotePolicyInitialized(
+ string fullyQualifiedBehaviorName,
+ IList sensors,
+ ActionSpec actionSpec,
+ IList actuators
+ )
+ {
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ if (!IsAnalyticsEnabled())
+ return;
+
+ if (!EnableAnalytics())
+ return;
+
+ // Extract base behavior name (no team ID)
+ var behaviorName = ParseBehaviorName(fullyQualifiedBehaviorName);
+ var added = s_SentRemotePolicyInitialized.Add(behaviorName);
+
+ if (!added)
+ {
+ // We previously added this model. Exit so we don't resend.
+ return;
+ }
+
+ var data = GetEventForRemotePolicy(behaviorName, sensors, actionSpec, actuators);
+ // Note - to debug, use JsonUtility.ToJson on the event.
+ // Debug.Log(
+ // $"Would send event {k_RemotePolicyInitializedEventName} with body {JsonUtility.ToJson(data, true)}"
+ // );
+ if (AnalyticsUtils.s_SendEditorAnalytics)
+ {
+ EditorAnalytics.SendEventWithLimit(k_RemotePolicyInitializedEventName, data);
+ }
+#endif
+ }
+
+ internal static string ParseBehaviorName(string fullyQualifiedBehaviorName)
+ {
+ var lastQuestionIndex = fullyQualifiedBehaviorName.LastIndexOf("?");
+ if (lastQuestionIndex < 0)
+ {
+ // Nothing to remove
+ return fullyQualifiedBehaviorName;
+ }
+
+ return fullyQualifiedBehaviorName.Substring(0, lastQuestionIndex);
+ }
+
+ internal static TrainingBehaviorInitializedEvent SanitizeTrainingBehaviorInitializedEvent(TrainingBehaviorInitializedEvent tbiEvent)
+ {
+ // Hash the behavior name if the message version is from an older version of ml-agents that doesn't do trainer-side hashing.
+ // We'll also, for extra safety, verify that the BehaviorName is the size of the expected SHA256 hash.
+ // Context: The config field was added at the same time as trainer side hashing, so messages including it should already be hashed.
+ if (tbiEvent.Config.Length == 0 || tbiEvent.BehaviorName.Length != 64)
+ {
+ tbiEvent.BehaviorName = AnalyticsUtils.Hash(k_VendorKey, tbiEvent.BehaviorName);
+ }
+
+ return tbiEvent;
+ }
+
+ [Conditional("MLA_UNITY_ANALYTICS_MODULE")]
+ public static void TrainingBehaviorInitialized(TrainingBehaviorInitializedEvent rawTbiEvent)
+ {
+#if UNITY_EDITOR && MLA_UNITY_ANALYTICS_MODULE && ENABLE_CLOUD_SERVICES_ANALYTICS
+ if (!IsAnalyticsEnabled())
+ return;
+
+ if (!EnableAnalytics())
+ return;
+
+ var tbiEvent = SanitizeTrainingBehaviorInitializedEvent(rawTbiEvent);
+ var behaviorName = tbiEvent.BehaviorName;
+ var added = s_SentTrainingBehaviorInitialized.Add(behaviorName);
+
+ if (!added)
+ {
+ // We previously added this model. Exit so we don't resend.
+ return;
+ }
+
+ tbiEvent.TrainingSessionGuid = s_TrainingSessionGuid.ToString();
+
+ // Note - to debug, use JsonUtility.ToJson on the event.
+ // Debug.Log(
+ // $"Would send event {k_TrainingBehaviorInitializedEventName} with body {JsonUtility.ToJson(tbiEvent, true)}"
+ // );
+ if (AnalyticsUtils.s_SendEditorAnalytics)
+ {
+ EditorAnalytics.SendEventWithLimit(k_TrainingBehaviorInitializedEventName, tbiEvent);
+ }
+#endif
+ }
+
+ internal static RemotePolicyInitializedEvent GetEventForRemotePolicy(
+ string behaviorName,
+ IList sensors,
+ ActionSpec actionSpec,
+ IList actuators
+ )
+ {
+ var remotePolicyEvent = new RemotePolicyInitializedEvent();
+
+ // Hash the behavior name so that there's no concern about PII or "secret" data being leaked.
+ remotePolicyEvent.BehaviorName = AnalyticsUtils.Hash(k_VendorKey, behaviorName);
+
+ remotePolicyEvent.TrainingSessionGuid = s_TrainingSessionGuid.ToString();
+ remotePolicyEvent.ActionSpec = EventActionSpec.FromActionSpec(actionSpec);
+ remotePolicyEvent.ObservationSpecs = new List(sensors.Count);
+ foreach (var sensor in sensors)
+ {
+ remotePolicyEvent.ObservationSpecs.Add(EventObservationSpec.FromSensor(sensor));
+ }
+
+ remotePolicyEvent.ActuatorInfos = new List(actuators.Count);
+ foreach (var actuator in actuators)
+ {
+ remotePolicyEvent.ActuatorInfos.Add(EventActuatorInfo.FromActuator(actuator));
+ }
+
+ remotePolicyEvent.MLAgentsEnvsVersion = s_TrainerPackageVersion;
+ remotePolicyEvent.TrainerCommunicationVersion = s_TrainerCommunicationVersion;
+ return remotePolicyEvent;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Analytics/TrainingAnalytics.cs.meta b/com.unity.ml-agents/Runtime/Analytics/TrainingAnalytics.cs.meta
new file mode 100644
index 0000000000..9109c265a2
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Analytics/TrainingAnalytics.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 5ad0bc6b45614bb7929d25dd59d5ac38
+timeCreated: 1608168600
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Areas.meta b/com.unity.ml-agents/Runtime/Areas.meta
new file mode 100644
index 0000000000..d00b0cf67c
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Areas.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: 4774a04ed09a1405cb957aace235adcb
+folderAsset: yes
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Areas/TrainingAreaReplicator.cs b/com.unity.ml-agents/Runtime/Areas/TrainingAreaReplicator.cs
new file mode 100644
index 0000000000..ef4a9d0633
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Areas/TrainingAreaReplicator.cs
@@ -0,0 +1,114 @@
+using System;
+using Unity.Mathematics;
+using UnityEngine;
+
+namespace Unity.MLAgents.Areas
+{
+ ///
+ /// The Training Ares Replicator allows for a training area object group to be replicated dynamically during runtime.
+ ///
+ [DefaultExecutionOrder(-5)]
+ public class TrainingAreaReplicator : MonoBehaviour
+ {
+ ///
+ /// The base training area to be replicated.
+ ///
+ public GameObject baseArea;
+
+ ///
+ /// The number of training areas to replicate.
+ ///
+ public int numAreas = 1;
+
+ ///
+ /// The separation between each training area.
+ ///
+ public float separation = 10f;
+
+ int3 m_GridSize = new int3(1, 1, 1);
+ int m_areaCount = 0;
+ string m_TrainingAreaName;
+
+ ///
+ /// The size of the computed grid to pack the training areas into.
+ ///
+ public int3 GridSize => m_GridSize;
+
+ ///
+ /// The name of the training area.
+ ///
+ public string TrainingAreaName => m_TrainingAreaName;
+
+ ///
+ /// Called before the simulation begins to computed the grid size for distributing
+ /// the replicated training areas and set the area name.
+ ///
+ public void Awake()
+ {
+ // Computes the Grid Size on Awake
+ ComputeGridSize();
+ // Sets the TrainingArea name to the name of the base area.
+ m_TrainingAreaName = baseArea.name;
+ }
+
+ ///
+ /// Called after Awake and before the simulation begins and adds the training areas before
+ /// the Academy begins.
+ ///
+ public void OnEnable()
+ {
+ // Adds the training are replicas during OnEnable to ensure they are added before the Academy begins its work.
+ AddEnvironments();
+ }
+
+ ///
+ /// Computes the Grid Size for replicating the training area.
+ ///
+ void ComputeGridSize()
+ {
+ // check if running inference, if so, use the num areas set through the component,
+ // otherwise, pull it from the academy
+ if (Academy.Instance.Communicator != null)
+ numAreas = Academy.Instance.NumAreas;
+
+ var rootNumAreas = Mathf.Pow(numAreas, 1.0f / 3.0f);
+ m_GridSize.x = Mathf.CeilToInt(rootNumAreas);
+ m_GridSize.y = Mathf.CeilToInt(rootNumAreas);
+ var zSize = Mathf.CeilToInt((float)numAreas / (m_GridSize.x * m_GridSize.y));
+ m_GridSize.z = zSize == 0 ? 1 : zSize;
+ }
+
+ ///
+ /// Adds replicas of the training area to the scene.
+ ///
+ ///
+ void AddEnvironments()
+ {
+ if (numAreas > m_GridSize.x * m_GridSize.y * m_GridSize.z)
+ {
+ throw new UnityAgentsException("The number of training areas that you have specified exceeds the size of the grid.");
+ }
+
+ for (int z = 0; z < m_GridSize.z; z++)
+ {
+ for (int y = 0; y < m_GridSize.y; y++)
+ {
+ for (int x = 0; x < m_GridSize.x; x++)
+ {
+ if (m_areaCount == 0)
+ {
+ // Skip this first area since it already exists.
+ m_areaCount = 1;
+ }
+ else if (m_areaCount < numAreas)
+ {
+ m_areaCount++;
+ var area = Instantiate(baseArea, new Vector3(x * separation, y * separation, z * separation), Quaternion.identity);
+ area.name = m_TrainingAreaName;
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Areas/TrainingAreaReplicator.cs.meta b/com.unity.ml-agents/Runtime/Areas/TrainingAreaReplicator.cs.meta
new file mode 100644
index 0000000000..84ac36d789
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Areas/TrainingAreaReplicator.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 7fc26c3bda6fe4937b2264ffe43190b7
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/AssemblyInfo.cs b/com.unity.ml-agents/Runtime/AssemblyInfo.cs
new file mode 100644
index 0000000000..377c8b0870
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/AssemblyInfo.cs
@@ -0,0 +1,14 @@
+using System.Runtime.CompilerServices;
+
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Runtime.Sensor.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Runtime.Utils.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Runtime.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Extensions")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Extensions.Input")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Extensions.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Pro")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Pro.Tests")]
+
diff --git a/com.unity.ml-agents/Runtime/AssemblyInfo.cs.meta b/com.unity.ml-agents/Runtime/AssemblyInfo.cs.meta
new file mode 100644
index 0000000000..1672ad458e
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/AssemblyInfo.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: b433ecadea36c4af9a3dc65e359a3ca0
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Communicator.meta b/com.unity.ml-agents/Runtime/Communicator.meta
new file mode 100644
index 0000000000..dc3a8bac9b
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: 432bb08962b3944c6964c0db6af43669
+folderAsset: yes
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs b/com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs
new file mode 100644
index 0000000000..02d1e4efbd
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs
@@ -0,0 +1,43 @@
+using System;
+
+namespace Unity.MLAgents
+{
+ ///
+ /// Factory class for an ICommunicator instance. This is used to the at startup.
+ /// By default, on desktop platforms, an ICommunicator will be created and attempt to connect
+ /// to a trainer. This behavior can be prevented by setting to false
+ /// *before* the is initialized.
+ ///
+ public static class CommunicatorFactory
+ {
+ static Func s_Creator;
+ static bool s_Enabled = true;
+
+ ///
+ /// Whether or not an ICommunicator instance will be created when the is initialized.
+ /// Changing this has no effect after the has already been initialized.
+ ///
+ public static bool Enabled
+ {
+ get => s_Enabled;
+ set => s_Enabled = value;
+ }
+
+ public static bool CommunicatorRegistered => s_Creator != null;
+
+ internal static ICommunicator Create()
+ {
+ return s_Enabled ? s_Creator() : null;
+ }
+
+ public static void Register(Func creator) where T : ICommunicator
+ {
+ s_Creator = () => creator();
+ }
+
+ public static void ClearCreator()
+ {
+ s_Creator = null;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs.meta b/com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs.meta
new file mode 100644
index 0000000000..1d208003e3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 0b604cddc07e4484a2cdaba630a971ea
+timeCreated: 1613617949
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs b/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs
new file mode 100644
index 0000000000..e5a97cd167
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs
@@ -0,0 +1,541 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Google.Protobuf;
+using Unity.MLAgents.CommunicatorObjects;
+using UnityEngine;
+using System.Runtime.CompilerServices;
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.Sensors;
+using Unity.MLAgents.Demonstrations;
+using Unity.MLAgents.Policies;
+
+using Unity.MLAgents.Analytics;
+
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Runtime.Utils.Tests")]
+
+namespace Unity.MLAgents
+{
+ internal static class GrpcExtensions
+ {
+ #region AgentInfo
+ ///
+ /// Static flag to make sure that we only fire the warning once.
+ ///
+ private static bool s_HaveWarnedTrainerCapabilitiesAgentGroup;
+
+ ///
+ /// Converts a AgentInfo to a protobuf generated AgentInfoActionPairProto
+ ///
+ /// The protobuf version of the AgentInfoActionPairProto.
+ public static AgentInfoActionPairProto ToInfoActionPairProto(this AgentInfo ai)
+ {
+ var agentInfoProto = ai.ToAgentInfoProto();
+
+ var agentActionProto = new AgentActionProto();
+
+ if (!ai.storedActions.IsEmpty())
+ {
+ if (!ai.storedActions.ContinuousActions.IsEmpty())
+ {
+ agentActionProto.ContinuousActions.AddRange(ai.storedActions.ContinuousActions.Array);
+ }
+ if (!ai.storedActions.DiscreteActions.IsEmpty())
+ {
+ agentActionProto.DiscreteActions.AddRange(ai.storedActions.DiscreteActions.Array);
+ }
+ }
+
+ return new AgentInfoActionPairProto
+ {
+ AgentInfo = agentInfoProto,
+ ActionInfo = agentActionProto
+ };
+ }
+
+ ///
+ /// Converts a AgentInfo to a protobuf generated AgentInfoProto
+ ///
+ /// The protobuf version of the AgentInfo.
+ public static AgentInfoProto ToAgentInfoProto(this AgentInfo ai)
+ {
+ if (ai.groupId > 0)
+ {
+ var trainerCanHandle = Academy.Instance.TrainerCapabilities == null || Academy.Instance.TrainerCapabilities.MultiAgentGroups;
+ if (!trainerCanHandle)
+ {
+ if (!s_HaveWarnedTrainerCapabilitiesAgentGroup)
+ {
+ Debug.LogWarning(
+ $"Attached trainer doesn't support Multi Agent Groups; group rewards will be ignored." +
+ "Please find the versions that work best together from our release page: " +
+ "https://github.com/Unity-Technologies/ml-agents/releases"
+ );
+ s_HaveWarnedTrainerCapabilitiesAgentGroup = true;
+ }
+ }
+ }
+ var agentInfoProto = new AgentInfoProto
+ {
+ Reward = ai.reward,
+ GroupReward = ai.groupReward,
+ MaxStepReached = ai.maxStepReached,
+ Done = ai.done,
+ Id = ai.episodeId,
+ GroupId = ai.groupId,
+ };
+
+ if (ai.discreteActionMasks != null)
+ {
+ agentInfoProto.ActionMask.AddRange(ai.discreteActionMasks);
+ }
+
+ return agentInfoProto;
+ }
+
+ ///
+ /// Get summaries for the observations in the AgentInfo part of the AgentInfoActionPairProto.
+ ///
+ ///
+ ///
+ public static List GetObservationSummaries(this AgentInfoActionPairProto infoActionPair)
+ {
+ List summariesOut = new List();
+ var agentInfo = infoActionPair.AgentInfo;
+ foreach (var obs in agentInfo.Observations)
+ {
+ var summary = new ObservationSummary();
+ summary.shape = obs.Shape.ToArray();
+ summariesOut.Add(summary);
+ }
+
+ return summariesOut;
+ }
+
+ #endregion
+
+ #region BrainParameters
+ ///
+ /// Converts a BrainParameters into to a BrainParametersProto so it can be sent.
+ ///
+ /// The BrainInfoProto generated.
+ /// The instance of BrainParameter to extend.
+ /// The name of the brain.
+ /// Whether or not the Brain is training.
+ public static BrainParametersProto ToProto(this BrainParameters bp, string name, bool isTraining)
+ {
+ // Disable deprecation warnings so we can set legacy fields
+#pragma warning disable CS0618
+ var brainParametersProto = new BrainParametersProto
+ {
+ VectorActionSpaceTypeDeprecated = (SpaceTypeProto)bp.VectorActionSpaceType,
+ BrainName = name,
+ IsTraining = isTraining,
+ ActionSpec = ToActionSpecProto(bp.ActionSpec),
+ };
+ if (bp.VectorActionSize != null)
+ {
+ brainParametersProto.VectorActionSizeDeprecated.AddRange(bp.VectorActionSize);
+ }
+ if (bp.VectorActionDescriptions != null)
+ {
+ brainParametersProto.VectorActionDescriptionsDeprecated.AddRange(bp.VectorActionDescriptions);
+ }
+#pragma warning restore CS0618
+ return brainParametersProto;
+ }
+
+ ///
+ /// Converts an ActionSpec into to a Protobuf BrainInfoProto so it can be sent.
+ ///
+ /// The BrainInfoProto generated.
+ /// Description of the actions for the Agent.
+ /// The name of the brain.
+ /// Whether or not the Brain is training.
+ public static BrainParametersProto ToBrainParametersProto(this ActionSpec actionSpec, string name, bool isTraining)
+ {
+ var brainParametersProto = new BrainParametersProto
+ {
+ BrainName = name,
+ IsTraining = isTraining,
+ ActionSpec = ToActionSpecProto(actionSpec),
+ };
+
+ var supportHybrid = Academy.Instance.TrainerCapabilities == null || Academy.Instance.TrainerCapabilities.HybridActions;
+ if (!supportHybrid)
+ {
+ actionSpec.CheckAllContinuousOrDiscrete();
+ if (actionSpec.NumContinuousActions > 0)
+ {
+ brainParametersProto.VectorActionSizeDeprecated.Add(actionSpec.NumContinuousActions);
+ brainParametersProto.VectorActionSpaceTypeDeprecated = SpaceTypeProto.Continuous;
+ }
+ else if (actionSpec.NumDiscreteActions > 0)
+ {
+ brainParametersProto.VectorActionSizeDeprecated.AddRange(actionSpec.BranchSizes);
+ brainParametersProto.VectorActionSpaceTypeDeprecated = SpaceTypeProto.Discrete;
+ }
+ }
+
+ // TODO handle ActionDescriptions?
+ return brainParametersProto;
+ }
+
+ ///
+ /// Convert a BrainParametersProto to a BrainParameters struct.
+ ///
+ /// An instance of a brain parameters protobuf object.
+ /// A BrainParameters struct.
+ public static BrainParameters ToBrainParameters(this BrainParametersProto bpp)
+ {
+ ActionSpec actionSpec;
+ if (bpp.ActionSpec == null)
+ {
+ // Disable deprecation warnings so we can set legacy fields
+#pragma warning disable CS0618
+ var spaceType = (SpaceType)bpp.VectorActionSpaceTypeDeprecated;
+ if (spaceType == SpaceType.Continuous)
+ {
+ actionSpec = ActionSpec.MakeContinuous(bpp.VectorActionSizeDeprecated.ToArray()[0]);
+ }
+ else
+ {
+ actionSpec = ActionSpec.MakeDiscrete(bpp.VectorActionSizeDeprecated.ToArray());
+ }
+#pragma warning restore CS0618
+ }
+ else
+ {
+ actionSpec = ToActionSpec(bpp.ActionSpec);
+ }
+ var bp = new BrainParameters
+ {
+ VectorActionDescriptions = bpp.VectorActionDescriptionsDeprecated.ToArray(),
+ ActionSpec = actionSpec,
+ };
+ return bp;
+ }
+
+ ///
+ /// Convert a ActionSpecProto to a ActionSpec struct.
+ ///
+ /// An instance of an action spec protobuf object.
+ /// An ActionSpec struct.
+ public static ActionSpec ToActionSpec(this ActionSpecProto actionSpecProto)
+ {
+ var actionSpec = new ActionSpec(actionSpecProto.NumContinuousActions);
+ if (actionSpecProto.DiscreteBranchSizes != null)
+ {
+ actionSpec.BranchSizes = actionSpecProto.DiscreteBranchSizes.ToArray();
+ }
+ return actionSpec;
+ }
+
+ ///
+ /// Convert a ActionSpec struct to a ActionSpecProto.
+ ///
+ /// An instance of an action spec struct.
+ /// An ActionSpecProto.
+ public static ActionSpecProto ToActionSpecProto(this ActionSpec actionSpec)
+ {
+ var actionSpecProto = new ActionSpecProto
+ {
+ NumContinuousActions = actionSpec.NumContinuousActions,
+ NumDiscreteActions = actionSpec.NumDiscreteActions,
+ };
+ if (actionSpec.BranchSizes != null)
+ {
+ actionSpecProto.DiscreteBranchSizes.AddRange(actionSpec.BranchSizes);
+ }
+ return actionSpecProto;
+ }
+
+ #endregion
+
+ #region DemonstrationMetaData
+ ///
+ /// Convert metadata object to proto object.
+ ///
+ public static DemonstrationMetaProto ToProto(this DemonstrationMetaData dm)
+ {
+ var demonstrationName = dm.demonstrationName ?? "";
+ var demoProto = new DemonstrationMetaProto
+ {
+ ApiVersion = DemonstrationMetaData.ApiVersion,
+ MeanReward = dm.meanReward,
+ NumberSteps = dm.numberSteps,
+ NumberEpisodes = dm.numberEpisodes,
+ DemonstrationName = demonstrationName
+ };
+ return demoProto;
+ }
+
+ ///
+ /// Initialize metadata values based on proto object.
+ ///
+ public static DemonstrationMetaData ToDemonstrationMetaData(this DemonstrationMetaProto demoProto)
+ {
+ var dm = new DemonstrationMetaData
+ {
+ numberEpisodes = demoProto.NumberEpisodes,
+ numberSteps = demoProto.NumberSteps,
+ meanReward = demoProto.MeanReward,
+ demonstrationName = demoProto.DemonstrationName
+ };
+ if (demoProto.ApiVersion != DemonstrationMetaData.ApiVersion)
+ {
+ throw new Exception("API versions of demonstration are incompatible.");
+ }
+ return dm;
+ }
+
+ #endregion
+
+ public static UnityRLInitParameters ToUnityRLInitParameters(this UnityRLInitializationInputProto inputProto)
+ {
+ return new UnityRLInitParameters
+ {
+ seed = inputProto.Seed,
+ numAreas = inputProto.NumAreas,
+ pythonLibraryVersion = inputProto.PackageVersion,
+ pythonCommunicationVersion = inputProto.CommunicationVersion,
+ TrainerCapabilities = inputProto.Capabilities.ToRLCapabilities()
+ };
+ }
+
+ #region AgentAction
+ public static List ToAgentActionList(this UnityRLInputProto.Types.ListAgentActionProto proto)
+ {
+ var agentActions = new List(proto.Value.Count);
+ foreach (var ap in proto.Value)
+ {
+ agentActions.Add(ap.ToActionBuffers());
+ }
+ return agentActions;
+ }
+
+ public static ActionBuffers ToActionBuffers(this AgentActionProto proto)
+ {
+ return new ActionBuffers(proto.ContinuousActions.ToArray(), proto.DiscreteActions.ToArray());
+ }
+
+ #endregion
+
+ #region Observations
+ ///
+ /// Static flag to make sure that we only fire the warning once.
+ ///
+ private static bool s_HaveWarnedTrainerCapabilitiesMultiPng;
+ private static bool s_HaveWarnedTrainerCapabilitiesMapping;
+
+ ///
+ /// Generate an ObservationProto for the sensor using the provided ObservationWriter.
+ /// This is equivalent to producing an Observation and calling Observation.ToProto(),
+ /// but avoid some intermediate memory allocations.
+ ///
+ ///
+ ///
+ ///
+ public static ObservationProto GetObservationProto(this ISensor sensor, ObservationWriter observationWriter)
+ {
+ var obsSpec = sensor.GetObservationSpec();
+ var shape = obsSpec.Shape;
+ ObservationProto observationProto = null;
+ var compressionSpec = sensor.GetCompressionSpec();
+ var compressionType = compressionSpec.SensorCompressionType;
+ // Check capabilities if we need to concatenate PNGs
+ if (compressionType == SensorCompressionType.PNG && shape.Length == 3 && shape[2] > 3)
+ {
+ var trainerCanHandle = Academy.Instance.TrainerCapabilities == null || Academy.Instance.TrainerCapabilities.ConcatenatedPngObservations;
+ if (!trainerCanHandle)
+ {
+ if (!s_HaveWarnedTrainerCapabilitiesMultiPng)
+ {
+ Debug.LogWarning(
+ $"Attached trainer doesn't support multiple PNGs. Switching to uncompressed observations for sensor {sensor.GetName()}. " +
+ "Please find the versions that work best together from our release page: " +
+ "https://github.com/Unity-Technologies/ml-agents/releases"
+ );
+ s_HaveWarnedTrainerCapabilitiesMultiPng = true;
+ }
+ compressionType = SensorCompressionType.None;
+ }
+ }
+ // Check capabilities if we need mapping for compressed observations
+ if (compressionType != SensorCompressionType.None && shape.Length == 3 && shape[2] > 3)
+ {
+ var trainerCanHandleMapping = Academy.Instance.TrainerCapabilities == null || Academy.Instance.TrainerCapabilities.CompressedChannelMapping;
+ var isTrivialMapping = compressionSpec.IsTrivialMapping();
+ if (!trainerCanHandleMapping && !isTrivialMapping)
+ {
+ if (!s_HaveWarnedTrainerCapabilitiesMapping)
+ {
+ Debug.LogWarning(
+ $"The sensor {sensor.GetName()} is using non-trivial mapping and " +
+ "the attached trainer doesn't support compression mapping. " +
+ "Switching to uncompressed observations. " +
+ "Please find the versions that work best together from our release page: " +
+ "https://github.com/Unity-Technologies/ml-agents/releases"
+ );
+ s_HaveWarnedTrainerCapabilitiesMapping = true;
+ }
+ compressionType = SensorCompressionType.None;
+ }
+ }
+
+ if (compressionType == SensorCompressionType.None)
+ {
+ var numFloats = sensor.ObservationSize();
+ var floatDataProto = new ObservationProto.Types.FloatData();
+ // Resize the float array
+ // TODO upgrade protobuf versions so that we can set the Capacity directly - see https://github.com/protocolbuffers/protobuf/pull/6530
+ for (var i = 0; i < numFloats; i++)
+ {
+ floatDataProto.Data.Add(0.0f);
+ }
+
+ observationWriter.SetTarget(floatDataProto.Data, sensor.GetObservationSpec(), 0);
+ sensor.Write(observationWriter);
+
+ observationProto = new ObservationProto
+ {
+ FloatData = floatDataProto,
+ CompressionType = (CompressionTypeProto)SensorCompressionType.None,
+ };
+ }
+ else
+ {
+ var compressedObs = sensor.GetCompressedObservation();
+ if (compressedObs == null)
+ {
+ throw new UnityAgentsException(
+ $"GetCompressedObservation() returned null data for sensor named {sensor.GetName()}. " +
+ "You must return a byte[]. If you don't want to use compressed observations, " +
+ "return CompressionSpec.Default() from GetCompressionSpec()."
+ );
+ }
+ observationProto = new ObservationProto
+ {
+ CompressedData = ByteString.CopyFrom(compressedObs),
+ CompressionType = (CompressionTypeProto)sensor.GetCompressionSpec().SensorCompressionType,
+ };
+ if (compressionSpec.CompressedChannelMapping != null)
+ {
+ observationProto.CompressedChannelMapping.AddRange(compressionSpec.CompressedChannelMapping);
+ }
+ }
+
+ // Add the dimension properties to the observationProto
+ var dimensionProperties = obsSpec.DimensionProperties;
+ for (int i = 0; i < dimensionProperties.Length; i++)
+ {
+ observationProto.DimensionProperties.Add((int)dimensionProperties[i]);
+ }
+
+ // Checking trainer compatibility with variable length observations
+ if (dimensionProperties == new InplaceArray(DimensionProperty.VariableSize, DimensionProperty.None))
+ {
+ var trainerCanHandleVarLenObs = Academy.Instance.TrainerCapabilities == null || Academy.Instance.TrainerCapabilities.VariableLengthObservation;
+ if (!trainerCanHandleVarLenObs)
+ {
+ throw new UnityAgentsException("Variable Length Observations are not supported by the trainer");
+ }
+ }
+
+ for (var i = 0; i < shape.Length; i++)
+ {
+ observationProto.Shape.Add(shape[i]);
+ }
+
+ var sensorName = sensor.GetName();
+ if (!string.IsNullOrEmpty(sensorName))
+ {
+ observationProto.Name = sensorName;
+ }
+
+ observationProto.ObservationType = (ObservationTypeProto)obsSpec.ObservationType;
+ return observationProto;
+ }
+
+ #endregion
+
+ public static UnityRLCapabilities ToRLCapabilities(this UnityRLCapabilitiesProto proto)
+ {
+ return new UnityRLCapabilities
+ {
+ BaseRLCapabilities = proto.BaseRLCapabilities,
+ ConcatenatedPngObservations = proto.ConcatenatedPngObservations,
+ CompressedChannelMapping = proto.CompressedChannelMapping,
+ HybridActions = proto.HybridActions,
+ TrainingAnalytics = proto.TrainingAnalytics,
+ VariableLengthObservation = proto.VariableLengthObservation,
+ MultiAgentGroups = proto.MultiAgentGroups,
+ };
+ }
+
+ public static UnityRLCapabilitiesProto ToProto(this UnityRLCapabilities rlCaps)
+ {
+ return new UnityRLCapabilitiesProto
+ {
+ BaseRLCapabilities = rlCaps.BaseRLCapabilities,
+ ConcatenatedPngObservations = rlCaps.ConcatenatedPngObservations,
+ CompressedChannelMapping = rlCaps.CompressedChannelMapping,
+ HybridActions = rlCaps.HybridActions,
+ TrainingAnalytics = rlCaps.TrainingAnalytics,
+ VariableLengthObservation = rlCaps.VariableLengthObservation,
+ MultiAgentGroups = rlCaps.MultiAgentGroups,
+ };
+ }
+
+ #region Analytics
+ internal static TrainingEnvironmentInitializedEvent ToTrainingEnvironmentInitializedEvent(
+ this TrainingEnvironmentInitialized inputProto)
+ {
+ return new TrainingEnvironmentInitializedEvent
+ {
+ TrainerPythonVersion = inputProto.PythonVersion,
+ MLAgentsVersion = inputProto.MlagentsVersion,
+ MLAgentsEnvsVersion = inputProto.MlagentsEnvsVersion,
+ TorchVersion = inputProto.TorchVersion,
+ TorchDeviceType = inputProto.TorchDeviceType,
+ NumEnvironments = inputProto.NumEnvs,
+ NumEnvironmentParameters = inputProto.NumEnvironmentParameters,
+ RunOptions = inputProto.RunOptions,
+ };
+ }
+
+ internal static TrainingBehaviorInitializedEvent ToTrainingBehaviorInitializedEvent(
+ this TrainingBehaviorInitialized inputProto)
+ {
+ RewardSignals rewardSignals = 0;
+ rewardSignals |= inputProto.ExtrinsicRewardEnabled ? RewardSignals.Extrinsic : 0;
+ rewardSignals |= inputProto.GailRewardEnabled ? RewardSignals.Gail : 0;
+ rewardSignals |= inputProto.CuriosityRewardEnabled ? RewardSignals.Curiosity : 0;
+ rewardSignals |= inputProto.RndRewardEnabled ? RewardSignals.Rnd : 0;
+
+ TrainingFeatures trainingFeatures = 0;
+ trainingFeatures |= inputProto.BehavioralCloningEnabled ? TrainingFeatures.BehavioralCloning : 0;
+ trainingFeatures |= inputProto.RecurrentEnabled ? TrainingFeatures.Recurrent : 0;
+ trainingFeatures |= inputProto.TrainerThreaded ? TrainingFeatures.Threaded : 0;
+ trainingFeatures |= inputProto.SelfPlayEnabled ? TrainingFeatures.SelfPlay : 0;
+ trainingFeatures |= inputProto.CurriculumEnabled ? TrainingFeatures.Curriculum : 0;
+
+
+ return new TrainingBehaviorInitializedEvent
+ {
+ BehaviorName = inputProto.BehaviorName,
+ TrainerType = inputProto.TrainerType,
+ RewardSignalFlags = rewardSignals,
+ TrainingFeatureFlags = trainingFeatures,
+ VisualEncoder = inputProto.VisualEncoder,
+ NumNetworkLayers = inputProto.NumNetworkLayers,
+ NumNetworkHiddenUnits = inputProto.NumNetworkHiddenUnits,
+ Config = inputProto.Config,
+ };
+ }
+
+ #endregion
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs.meta b/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs.meta
new file mode 100644
index 0000000000..31c109f8fa
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 02e8742d8a124607bef3b5ff8b9dd3d0
+timeCreated: 1569444771
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs b/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs
new file mode 100644
index 0000000000..2036a2aa28
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs
@@ -0,0 +1,173 @@
+using System;
+using System.Collections.Generic;
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.Sensors;
+
+namespace Unity.MLAgents
+{
+ public struct CommunicatorInitParameters
+ {
+ ///
+ /// Port to listen for connections on.
+ ///
+ public int port;
+
+ ///
+ /// The name of the environment.
+ ///
+ public string name;
+
+ ///
+ /// The version of the Unity SDK.
+ ///
+ public string unityPackageVersion;
+
+ ///
+ /// The version of the communication API.
+ ///
+ public string unityCommunicationVersion;
+
+ ///
+ /// The RL capabilities of the C# codebase.
+ ///
+ public UnityRLCapabilities CSharpCapabilities;
+ }
+ public struct UnityRLInitParameters
+ {
+ ///
+ /// A random number generator (RNG) seed sent from the python process to Unity.
+ ///
+ public int seed;
+
+ ///
+ /// The number of areas to replicate if Training Area Replication is used in the scene.
+ ///
+ public int numAreas;
+
+ ///
+ /// The library version of the python process.
+ ///
+ public string pythonLibraryVersion;
+
+ ///
+ /// The version of the communication API that python is using.
+ ///
+ public string pythonCommunicationVersion;
+
+ ///
+ /// The RL capabilities of the Trainer codebase.
+ ///
+ public UnityRLCapabilities TrainerCapabilities;
+ }
+ internal struct UnityRLInputParameters
+ {
+ ///
+ /// Boolean sent back from python to indicate whether or not training is happening.
+ ///
+ public bool isTraining;
+ }
+
+ ///
+ /// Delegate for handling quit events sent back from the communicator.
+ ///
+ public delegate void QuitCommandHandler();
+
+ ///
+ /// Delegate for handling reset parameter updates sent from the communicator.
+ ///
+ public delegate void ResetCommandHandler();
+
+ ///
+ /// Delegate to handle UnityRLInputParameters updates from the communicator.
+ ///
+ ///
+ internal delegate void RLInputReceivedHandler(UnityRLInputParameters inputParams);
+
+ /**
+ This is the interface of the Communicators.
+ This does not need to be modified nor implemented to create a Unity environment.
+
+ When the Unity Communicator is initialized, it will wait for the External Communicator
+ to be initialized as well. The two communicators will then exchange their first messages
+ that will usually contain information for initialization (information that does not need
+ to be resent at each new exchange).
+
+ By convention a Unity input is from External to Unity and a Unity output is from Unity to
+ External. Inputs and outputs are relative to Unity.
+
+ By convention, when the Unity Communicator and External Communicator call exchange, the
+ exchange is NOT simultaneous but sequential. This means that when a side of the
+ communication calls exchange, the other will receive the result of its previous
+ exchange call.
+ This is what happens when A calls exchange a single time:
+ A sends data_1 to B -> B receives data_1 -> B generates and sends data_2 -> A receives data_2
+ When A calls exchange, it sends data_1 and receives data_2
+
+ Since the messages are sent back and forth with exchange and simultaneously when calling
+ initialize, External sends two messages at initialization.
+
+ The structure of the messages is as follows:
+ UnityMessage
+ ...Header
+ ...UnityOutput
+ ......UnityRLOutput
+ ......UnityRLInitializationOutput
+ ...UnityInput
+ ......UnityRLInput
+ ......UnityRLInitializationInput
+
+ UnityOutput and UnityInput can be extended to provide functionalities beyond RL
+ UnityRLOutput and UnityRLInput can be extended to provide new RL functionalities
+ */
+ public interface ICommunicator : IDisposable
+ {
+ ///
+ /// Quit was received by the communicator.
+ ///
+ event QuitCommandHandler QuitCommandReceived;
+
+ ///
+ /// Reset command sent back from the communicator.
+ ///
+ event ResetCommandHandler ResetCommandReceived;
+
+ ///
+ /// Sends the academy parameters through the Communicator.
+ /// Is used by the academy to send the AcademyParameters to the communicator.
+ ///
+ /// Whether the connection was successful.
+ /// The Unity Initialization Parameters to be sent.
+ /// The External Initialization Parameters received
+ bool Initialize(CommunicatorInitParameters initParameters, out UnityRLInitParameters initParametersOut);
+
+ ///
+ /// Registers a new Brain to the Communicator.
+ ///
+ /// The name or key uniquely identifying the Brain.
+ /// Description of the actions for the Agent.
+ void SubscribeBrain(string name, ActionSpec actionSpec);
+
+ ///
+ /// Sends the observations of one Agent.
+ ///
+ /// Batch Key.
+ /// Agent info.
+ /// The list of ISensors of the Agent.
+ void PutObservations(string brainKey, AgentInfo info, List sensors);
+
+ ///
+ /// Signals the ICommunicator that the Agents are now ready to receive their action
+ /// and that if the communicator has not yet received an action for one of the Agents
+ /// it needs to get one at this point.
+ ///
+ void DecideBatch();
+
+ ///
+ /// Gets the AgentActions based on the batching key.
+ ///
+ /// A key to identify which behavior actions to get.
+ /// A key to identify which Agent actions to get.
+ ///
+ ActionBuffers GetActions(string key, int agentId);
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs.meta b/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs.meta
new file mode 100644
index 0000000000..15f8a01eb3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 53977f05e5684d4a9e2ef86f225934a2
+timeCreated: 1568395551
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs b/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
new file mode 100644
index 0000000000..24d8ae563e
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
@@ -0,0 +1,608 @@
+#if UNITY_EDITOR || UNITY_STANDALONE
+#define MLA_SUPPORTED_TRAINING_PLATFORM
+#endif
+
+#if MLA_SUPPORTED_TRAINING_PLATFORM
+using Grpc.Core;
+#if UNITY_EDITOR
+using UnityEditor;
+#endif
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using UnityEngine;
+using Unity.MLAgents.Actuators;
+using Unity.MLAgents.CommunicatorObjects;
+using Unity.MLAgents.Sensors;
+using Unity.MLAgents.SideChannels;
+using Google.Protobuf;
+
+using Unity.MLAgents.Analytics;
+
+namespace Unity.MLAgents
+{
+ /// Responsible for communication with External using gRPC.
+ public class RpcCommunicator : ICommunicator
+ {
+ public event QuitCommandHandler QuitCommandReceived;
+ public event ResetCommandHandler ResetCommandReceived;
+
+ /// If true, the communication is active.
+ bool m_IsOpen;
+
+ List m_BehaviorNames = new List();
+ bool m_NeedCommunicateThisStep;
+ ObservationWriter m_ObservationWriter = new ObservationWriter();
+ Dictionary m_SensorShapeValidators = new Dictionary();
+ Dictionary> m_OrderedAgentsRequestingDecisions = new Dictionary>();
+
+ /// The current UnityRLOutput to be sent when all the brains queried the communicator
+ UnityRLOutputProto m_CurrentUnityRlOutput =
+ new UnityRLOutputProto();
+
+ Dictionary> m_LastActionsReceived =
+ new Dictionary>();
+
+ // Brains that we have sent over the communicator with agents.
+ HashSet m_SentBrainKeys = new HashSet();
+ Dictionary m_UnsentBrainKeys = new Dictionary();
+
+
+ /// The Unity to External client.
+ UnityToExternalProto.UnityToExternalProtoClient m_Client;
+ Channel m_Channel;
+
+ ///
+ /// Initializes a new instance of the RPCCommunicator class.
+ ///
+ protected RpcCommunicator()
+ {
+ }
+
+ public static RpcCommunicator Create()
+ {
+#if MLA_SUPPORTED_TRAINING_PLATFORM
+ return new RpcCommunicator();
+#else
+ return null;
+#endif
+ }
+
+#region Initialization
+
+ internal static bool CheckCommunicationVersionsAreCompatible(
+ string unityCommunicationVersion,
+ string pythonApiVersion
+ )
+ {
+ var unityVersion = new Version(unityCommunicationVersion);
+ var pythonVersion = new Version(pythonApiVersion);
+ if (unityVersion.Major == 0)
+ {
+ if (unityVersion.Major != pythonVersion.Major || unityVersion.Minor != pythonVersion.Minor)
+ {
+ return false;
+ }
+ }
+ else if (unityVersion.Major != pythonVersion.Major)
+ {
+ return false;
+ }
+ else if (unityVersion.Minor != pythonVersion.Minor)
+ {
+ // If a feature is used in Unity but not supported in the trainer,
+ // we will warn at the point it's used. Don't warn here to avoid noise.
+ }
+ return true;
+ }
+
+ ///
+ /// Sends the initialization parameters through the Communicator.
+ /// Is used by the academy to send initialization parameters to the communicator.
+ ///
+ /// Whether the connection was successful.
+ /// The Unity Initialization Parameters to be sent.
+ /// The External Initialization Parameters received.
+ public bool Initialize(CommunicatorInitParameters initParameters, out UnityRLInitParameters initParametersOut)
+ {
+#if MLA_SUPPORTED_TRAINING_PLATFORM
+ var academyParameters = new UnityRLInitializationOutputProto
+ {
+ Name = initParameters.name,
+ PackageVersion = initParameters.unityPackageVersion,
+ CommunicationVersion = initParameters.unityCommunicationVersion,
+ Capabilities = initParameters.CSharpCapabilities.ToProto()
+ };
+
+ UnityInputProto input;
+ UnityInputProto initializationInput;
+ try
+ {
+ initializationInput = Initialize(
+ initParameters.port,
+ new UnityOutputProto
+ {
+ RlInitializationOutput = academyParameters
+ },
+ out input
+ );
+ }
+ catch (Exception ex)
+ {
+ if (ex is RpcException rpcException)
+ {
+
+ switch (rpcException.Status.StatusCode)
+ {
+ case StatusCode.Unavailable:
+ // This is the common case where there's no trainer to connect to.
+ break;
+ case StatusCode.DeadlineExceeded:
+ // We don't currently set a deadline for connection, but likely will in the future.
+ break;
+ default:
+ Debug.Log($"Unexpected gRPC exception when trying to initialize communication: {rpcException}");
+ break;
+ }
+ }
+ else
+ {
+ Debug.Log($"Unexpected exception when trying to initialize communication: {ex}");
+ }
+ initParametersOut = new UnityRLInitParameters();
+ NotifyQuitAndShutDownChannel();
+ return false;
+ }
+
+ var pythonPackageVersion = initializationInput.RlInitializationInput.PackageVersion;
+ var pythonCommunicationVersion = initializationInput.RlInitializationInput.CommunicationVersion;
+ TrainingAnalytics.SetTrainerInformation(pythonPackageVersion, pythonCommunicationVersion);
+
+ var communicationIsCompatible = CheckCommunicationVersionsAreCompatible(
+ initParameters.unityCommunicationVersion,
+ pythonCommunicationVersion
+ );
+
+ // Initialization succeeded part-way. The most likely cause is a mismatch between the communicator
+ // API strings, so log an explicit warning if that's the case.
+ if (initializationInput != null && input == null)
+ {
+ if (!communicationIsCompatible)
+ {
+ Debug.LogWarningFormat(
+ "Communication protocol between python ({0}) and Unity ({1}) have different " +
+ "versions which make them incompatible. Python library version: {2}.",
+ pythonCommunicationVersion, initParameters.unityCommunicationVersion,
+ pythonPackageVersion
+ );
+ }
+ else
+ {
+ Debug.LogWarningFormat(
+ "Unknown communication error between Python. Python communication protocol: {0}, " +
+ "Python library version: {1}.",
+ pythonCommunicationVersion,
+ pythonPackageVersion
+ );
+ }
+
+ initParametersOut = new UnityRLInitParameters();
+ return false;
+ }
+
+ UpdateEnvironmentWithInput(input.RlInput);
+ initParametersOut = initializationInput.RlInitializationInput.ToUnityRLInitParameters();
+ // Be sure to shut down the grpc channel when the application is quitting.
+ Application.quitting += NotifyQuitAndShutDownChannel;
+ return true;
+#else
+ initParametersOut = new UnityRLInitParameters();
+ return false;
+#endif
+ }
+
+ ///
+ /// Adds the brain to the list of brains which will be sending information to External.
+ ///
+ /// Brain key.
+ /// Description of the actions for the Agent.
+ public void SubscribeBrain(string brainKey, ActionSpec actionSpec)
+ {
+ if (m_BehaviorNames.Contains(brainKey))
+ {
+ return;
+ }
+ m_BehaviorNames.Add(brainKey);
+ m_CurrentUnityRlOutput.AgentInfos.Add(
+ brainKey,
+ new UnityRLOutputProto.Types.ListAgentInfoProto()
+ );
+
+ CacheActionSpec(brainKey, actionSpec);
+ }
+
+ void UpdateEnvironmentWithInput(UnityRLInputProto rlInput)
+ {
+ SideChannelManager.ProcessSideChannelData(rlInput.SideChannel.ToArray());
+ SendCommandEvent(rlInput.Command);
+ }
+
+ UnityInputProto Initialize(int port, UnityOutputProto unityOutput, out UnityInputProto unityInput)
+ {
+ m_IsOpen = true;
+ m_Channel = new Channel($"localhost:{port}", ChannelCredentials.Insecure);
+
+ m_Client = new UnityToExternalProto.UnityToExternalProtoClient(m_Channel);
+ var result = m_Client.Exchange(WrapMessage(unityOutput, 200));
+ var inputMessage = m_Client.Exchange(WrapMessage(null, 200));
+ unityInput = inputMessage.UnityInput;
+#if UNITY_EDITOR
+ EditorApplication.playModeStateChanged += HandleOnPlayModeChanged;
+#endif
+ if (result.Header.Status != 200 || inputMessage.Header.Status != 200)
+ {
+ m_IsOpen = false;
+ NotifyQuitAndShutDownChannel();
+ }
+ return result.UnityInput;
+ }
+
+ void NotifyQuitAndShutDownChannel()
+ {
+ QuitCommandReceived?.Invoke();
+ try
+ {
+ m_Channel.ShutdownAsync().Wait();
+ }
+ catch (Exception)
+ {
+ // do nothing
+ }
+ }
+
+#endregion
+
+#region Destruction
+
+ ///
+ /// Close the communicator gracefully on both sides of the communication.
+ ///
+ public void Dispose()
+ {
+ if (!m_IsOpen)
+ {
+ return;
+ }
+
+ try
+ {
+ m_Client.Exchange(WrapMessage(null, 400));
+ m_IsOpen = false;
+ }
+ catch
+ {
+ // ignored
+ }
+ }
+
+#endregion
+
+#region Sending Events
+
+ void SendCommandEvent(CommandProto command)
+ {
+ switch (command)
+ {
+ case CommandProto.Quit:
+ {
+ NotifyQuitAndShutDownChannel();
+ return;
+ }
+ case CommandProto.Reset:
+ {
+ foreach (var brainName in m_OrderedAgentsRequestingDecisions.Keys)
+ {
+ m_OrderedAgentsRequestingDecisions[brainName].Clear();
+ }
+ ResetCommandReceived?.Invoke();
+ return;
+ }
+ default:
+ {
+ return;
+ }
+ }
+ }
+
+#endregion
+
+#region Sending and retreiving data
+
+ public void DecideBatch()
+ {
+ if (!m_NeedCommunicateThisStep)
+ {
+ return;
+ }
+ m_NeedCommunicateThisStep = false;
+
+ SendBatchedMessageHelper();
+ }
+
+ ///
+ /// Sends the observations of one Agent.
+ ///
+ /// Batch Key.
+ /// Agent info.
+ /// Sensors that will produce the observations
+ public void PutObservations(string behaviorName, AgentInfo info, List sensors)
+ {
+#if DEBUG
+ if (!m_SensorShapeValidators.ContainsKey(behaviorName))
+ {
+ m_SensorShapeValidators[behaviorName] = new SensorShapeValidator();
+ }
+ m_SensorShapeValidators[behaviorName].ValidateSensors(sensors);
+#endif
+
+ using (TimerStack.Instance.Scoped("AgentInfo.ToProto"))
+ {
+ var agentInfoProto = info.ToAgentInfoProto();
+
+ using (TimerStack.Instance.Scoped("GenerateSensorData"))
+ {
+ foreach (var sensor in sensors)
+ {
+ var obsProto = sensor.GetObservationProto(m_ObservationWriter);
+ agentInfoProto.Observations.Add(obsProto);
+ }
+ }
+ m_CurrentUnityRlOutput.AgentInfos[behaviorName].Value.Add(agentInfoProto);
+ }
+
+ m_NeedCommunicateThisStep = true;
+ if (!m_OrderedAgentsRequestingDecisions.ContainsKey(behaviorName))
+ {
+ m_OrderedAgentsRequestingDecisions[behaviorName] = new List();
+ }
+ if (!info.done)
+ {
+ m_OrderedAgentsRequestingDecisions[behaviorName].Add(info.episodeId);
+ }
+ if (!m_LastActionsReceived.ContainsKey(behaviorName))
+ {
+ m_LastActionsReceived[behaviorName] = new Dictionary();
+ }
+ m_LastActionsReceived[behaviorName][info.episodeId] = ActionBuffers.Empty;
+ if (info.done)
+ {
+ m_LastActionsReceived[behaviorName].Remove(info.episodeId);
+ }
+ }
+
+ ///
+ /// Helper method that sends the current UnityRLOutput, receives the next UnityInput and
+ /// Applies the appropriate AgentAction to the agents.
+ ///
+ void SendBatchedMessageHelper()
+ {
+ var message = new UnityOutputProto
+ {
+ RlOutput = m_CurrentUnityRlOutput,
+ };
+ var tempUnityRlInitializationOutput = GetTempUnityRlInitializationOutput();
+ if (tempUnityRlInitializationOutput != null)
+ {
+ message.RlInitializationOutput = tempUnityRlInitializationOutput;
+ }
+
+ byte[] messageAggregated = SideChannelManager.GetSideChannelMessage();
+ message.RlOutput.SideChannel = ByteString.CopyFrom(messageAggregated);
+
+ var input = Exchange(message);
+ UpdateSentActionSpec(tempUnityRlInitializationOutput);
+
+ foreach (var k in m_CurrentUnityRlOutput.AgentInfos.Keys)
+ {
+ m_CurrentUnityRlOutput.AgentInfos[k].Value.Clear();
+ }
+
+ var rlInput = input?.RlInput;
+
+ if (rlInput?.AgentActions == null)
+ {
+ return;
+ }
+
+ UpdateEnvironmentWithInput(rlInput);
+
+ foreach (var brainName in rlInput.AgentActions.Keys)
+ {
+ if (!m_OrderedAgentsRequestingDecisions[brainName].Any())
+ {
+ continue;
+ }
+
+ if (!rlInput.AgentActions[brainName].Value.Any())
+ {
+ continue;
+ }
+
+ var agentActions = rlInput.AgentActions[brainName].ToAgentActionList();
+ var numAgents = m_OrderedAgentsRequestingDecisions[brainName].Count;
+ for (var i = 0; i < numAgents; i++)
+ {
+ var agentAction = agentActions[i];
+ var agentId = m_OrderedAgentsRequestingDecisions[brainName][i];
+ if (m_LastActionsReceived[brainName].ContainsKey(agentId))
+ {
+ m_LastActionsReceived[brainName][agentId] = agentAction;
+ }
+ }
+ }
+ foreach (var brainName in m_OrderedAgentsRequestingDecisions.Keys)
+ {
+ m_OrderedAgentsRequestingDecisions[brainName].Clear();
+ }
+ }
+
+ public ActionBuffers GetActions(string behaviorName, int agentId)
+ {
+ if (m_LastActionsReceived.ContainsKey(behaviorName))
+ {
+ if (m_LastActionsReceived[behaviorName].ContainsKey(agentId))
+ {
+ return m_LastActionsReceived[behaviorName][agentId];
+ }
+ }
+ return ActionBuffers.Empty;
+ }
+
+ ///
+ /// Send a UnityOutput and receives a UnityInput.
+ ///
+ /// The next UnityInput.
+ /// The UnityOutput to be sent.
+ UnityInputProto Exchange(UnityOutputProto unityOutput)
+ {
+ if (!m_IsOpen)
+ {
+ return null;
+ }
+
+ try
+ {
+ var message = m_Client.Exchange(WrapMessage(unityOutput, 200));
+ if (message.Header.Status == 200)
+ {
+ return message.UnityInput;
+ }
+
+ m_IsOpen = false;
+ // Not sure if the quit command is actually sent when a
+ // non 200 message is received. Notify that we are indeed
+ // quitting.
+ NotifyQuitAndShutDownChannel();
+ return message.UnityInput;
+ }
+ catch (Exception ex)
+ {
+ if (ex is RpcException rpcException)
+ {
+ // Log more verbose errors if they're something the user can possibly do something about.
+ switch (rpcException.Status.StatusCode)
+ {
+ case StatusCode.Unavailable:
+ // This can happen when python disconnects. Ignore it to avoid noisy logs.
+ break;
+ case StatusCode.ResourceExhausted:
+ // This happens is the message body is too large. There's no way to
+ // gracefully handle this, but at least we can show the message and the
+ // user can try to reduce the number of agents or observation sizes.
+ Debug.LogError($"GRPC Exception: {rpcException.Message}. Disconnecting from trainer.");
+ break;
+ default:
+ // Other unknown errors. Log at INFO level.
+ Debug.Log($"GRPC Exception: {rpcException.Message}. Disconnecting from trainer.");
+ break;
+ }
+ }
+ else
+ {
+ // Fall-through for other error types
+ Debug.LogError($"Communication Exception: {ex.Message}. Disconnecting from trainer.");
+ }
+
+ m_IsOpen = false;
+ NotifyQuitAndShutDownChannel();
+ return null;
+ }
+ }
+
+ ///
+ /// Wraps the UnityOutput into a message with the appropriate status.
+ ///
+ /// The UnityMessage corresponding.
+ /// The UnityOutput to be wrapped.
+ /// The status of the message.
+ static UnityMessageProto WrapMessage(UnityOutputProto content, int status)
+ {
+ return new UnityMessageProto
+ {
+ Header = new HeaderProto { Status = status },
+ UnityOutput = content
+ };
+ }
+
+ void CacheActionSpec(string behaviorName, ActionSpec actionSpec)
+ {
+ if (m_SentBrainKeys.Contains(behaviorName))
+ {
+ return;
+ }
+
+ // TODO We should check that if m_unsentBrainKeys has brainKey, it equals actionSpec
+ m_UnsentBrainKeys[behaviorName] = actionSpec;
+ }
+
+ UnityRLInitializationOutputProto GetTempUnityRlInitializationOutput()
+ {
+ UnityRLInitializationOutputProto output = null;
+ foreach (var behaviorName in m_UnsentBrainKeys.Keys)
+ {
+ if (m_CurrentUnityRlOutput.AgentInfos.ContainsKey(behaviorName))
+ {
+ if (m_CurrentUnityRlOutput.AgentInfos[behaviorName].CalculateSize() > 0)
+ {
+ // Only send the actionSpec if there is a non empty list of
+ // AgentInfos ready to be sent.
+ // This is to ensure that The Python side will always have a first
+ // observation when receiving the ActionSpec
+ if (output == null)
+ {
+ output = new UnityRLInitializationOutputProto();
+ }
+
+ var actionSpec = m_UnsentBrainKeys[behaviorName];
+ output.BrainParameters.Add(actionSpec.ToBrainParametersProto(behaviorName, true));
+ }
+ }
+ }
+
+ return output;
+ }
+
+ void UpdateSentActionSpec(UnityRLInitializationOutputProto output)
+ {
+ if (output == null)
+ {
+ return;
+ }
+
+ foreach (var brainProto in output.BrainParameters)
+ {
+ m_SentBrainKeys.Add(brainProto.BrainName);
+ m_UnsentBrainKeys.Remove(brainProto.BrainName);
+ }
+ }
+
+#endregion
+
+#if UNITY_EDITOR
+ ///
+ /// When the editor exits, the communicator must be closed
+ ///
+ /// State.
+ void HandleOnPlayModeChanged(PlayModeStateChange state)
+ {
+ // This method is run whenever the playmode state is changed.
+ if (state == PlayModeStateChange.ExitingPlayMode)
+ {
+ Dispose();
+ }
+ }
+
+#endif
+ }
+}
+#endif // UNITY_EDITOR || UNITY_STANDALONE
diff --git a/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs.meta b/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs.meta
new file mode 100644
index 0000000000..d1903d74cc
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs.meta
@@ -0,0 +1,13 @@
+fileFormatVersion: 2
+guid: 57a3dc12d3b88408688bb490b65a838e
+timeCreated: 1523046536
+licenseType: Free
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Communicator/UnityRLCapabilities.cs b/com.unity.ml-agents/Runtime/Communicator/UnityRLCapabilities.cs
new file mode 100644
index 0000000000..3dffcf52b7
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/UnityRLCapabilities.cs
@@ -0,0 +1,54 @@
+using UnityEngine;
+
+namespace Unity.MLAgents
+{
+ public class UnityRLCapabilities
+ {
+ public bool BaseRLCapabilities;
+ public bool ConcatenatedPngObservations;
+ public bool CompressedChannelMapping;
+ public bool HybridActions;
+ public bool TrainingAnalytics;
+ public bool VariableLengthObservation;
+ public bool MultiAgentGroups;
+
+ ///
+ /// A class holding the capabilities flags for Reinforcement Learning across C# and the Trainer codebase. This
+ /// struct will be used to inform users if and when they are using C# / Trainer features that are mismatched.
+ ///
+ public UnityRLCapabilities(
+ bool baseRlCapabilities = true,
+ bool concatenatedPngObservations = true,
+ bool compressedChannelMapping = true,
+ bool hybridActions = true,
+ bool trainingAnalytics = true,
+ bool variableLengthObservation = true,
+ bool multiAgentGroups = true)
+ {
+ BaseRLCapabilities = baseRlCapabilities;
+ ConcatenatedPngObservations = concatenatedPngObservations;
+ CompressedChannelMapping = compressedChannelMapping;
+ HybridActions = hybridActions;
+ TrainingAnalytics = trainingAnalytics;
+ VariableLengthObservation = variableLengthObservation;
+ MultiAgentGroups = multiAgentGroups;
+ }
+
+ ///
+ /// Will print a warning to the console if Python does not support base capabilities and will
+ /// return true if the warning was printed.
+ ///
+ ///
+ public bool WarnOnPythonMissingBaseRLCapabilities()
+ {
+ if (BaseRLCapabilities)
+ {
+ return false;
+ }
+ Debug.LogWarning("Unity has connected to a Training process that does not support" +
+ "Base Reinforcement Learning Capabilities. Please make sure you have the" +
+ " latest training codebase installed for this version of the ML-Agents package.");
+ return true;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Communicator/UnityRLCapabilities.cs.meta b/com.unity.ml-agents/Runtime/Communicator/UnityRLCapabilities.cs.meta
new file mode 100644
index 0000000000..6cdc57628e
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Communicator/UnityRLCapabilities.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: f95d271af72d4b75aa94d308222f79d8
+timeCreated: 1587670989
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Constants.cs b/com.unity.ml-agents/Runtime/Constants.cs
new file mode 100644
index 0000000000..4be9eba042
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Constants.cs
@@ -0,0 +1,12 @@
+namespace Unity.MLAgents
+{
+ ///
+ /// Grouping for use in AddComponentMenu (instead of nesting the menus).
+ ///
+ internal enum MenuGroup
+ {
+ Default = 0,
+ Sensors = 50,
+ Actuators = 100
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Constants.cs.meta b/com.unity.ml-agents/Runtime/Constants.cs.meta
new file mode 100644
index 0000000000..f963ba55aa
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Constants.cs.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 0622d88401ec464d9d2cf2fb03ce17b5
+timeCreated: 1579215785
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/DecisionRequester.cs b/com.unity.ml-agents/Runtime/DecisionRequester.cs
new file mode 100644
index 0000000000..49590c7be4
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/DecisionRequester.cs
@@ -0,0 +1,123 @@
+using System;
+using UnityEngine;
+using UnityEngine.Serialization;
+
+namespace Unity.MLAgents
+{
+ ///
+ /// The DecisionRequester component automatically request decisions for an
+ /// instance at regular intervals.
+ ///
+ ///
+ /// Attach a DecisionRequester component to the same [GameObject] as the
+ /// component.
+ ///
+ /// The DecisionRequester component provides a convenient and flexible way to
+ /// trigger the agent decision making process. Without a DecisionRequester,
+ /// your implementation must manually call its
+ /// function.
+ ///
+ [AddComponentMenu("ML Agents/Decision Requester", (int)MenuGroup.Default)]
+ [RequireComponent(typeof(Agent))]
+ [DefaultExecutionOrder(-10)]
+ public class DecisionRequester : MonoBehaviour
+ {
+ ///
+ /// The frequency with which the agent requests a decision. A DecisionPeriod of 5 means
+ /// that the Agent will request a decision every 5 Academy steps. ///
+ [Range(1, 20)]
+ [Tooltip("The frequency with which the agent requests a decision. A DecisionPeriod " +
+ "of 5 means that the Agent will request a decision every 5 Academy steps.")]
+ public int DecisionPeriod = 5;
+
+ ///
+ /// Indicates whether or not the agent will take an action during the Academy steps where
+ /// it does not request a decision. Has no effect when DecisionPeriod is set to 1.
+ ///
+ [Tooltip("Indicates whether or not the agent will take an action during the Academy " +
+ "steps where it does not request a decision. Has no effect when DecisionPeriod " +
+ "is set to 1.")]
+ [FormerlySerializedAs("RepeatAction")]
+ public bool TakeActionsBetweenDecisions = true;
+
+ [NonSerialized]
+ Agent m_Agent;
+
+ ///
+ /// Get the Agent attached to the DecisionRequester.
+ ///
+ public Agent Agent
+ {
+ get => m_Agent;
+ }
+
+ internal void Awake()
+ {
+ m_Agent = gameObject.GetComponent();
+ Debug.Assert(m_Agent != null, "Agent component was not found on this gameObject and is required.");
+ Academy.Instance.AgentPreStep += MakeRequests;
+ }
+
+ void OnDestroy()
+ {
+ if (Academy.IsInitialized)
+ {
+ Academy.Instance.AgentPreStep -= MakeRequests;
+ }
+ }
+
+ ///
+ /// Information about Academy step used to make decisions about whether to request a decision.
+ ///
+ public struct DecisionRequestContext
+ {
+ ///
+ /// The current step count of the Academy, equivalent to Academy.StepCount.
+ ///
+ public int AcademyStepCount;
+ }
+
+ ///
+ /// Method that hooks into the Academy in order inform the Agent on whether or not it should request a
+ /// decision, and whether or not it should take actions between decisions.
+ ///
+ /// The current step count of the academy.
+ void MakeRequests(int academyStepCount)
+ {
+ var context = new DecisionRequestContext
+ {
+ AcademyStepCount = academyStepCount
+ };
+
+ if (ShouldRequestDecision(context))
+ {
+ m_Agent?.RequestDecision();
+ }
+
+ if (ShouldRequestAction(context))
+ {
+ m_Agent?.RequestAction();
+ }
+ }
+
+ ///
+ /// Whether Agent.RequestDecision should be called on this update step.
+ ///
+ ///
+ ///
+ protected virtual bool ShouldRequestDecision(DecisionRequestContext context)
+ {
+ return context.AcademyStepCount % DecisionPeriod == 0;
+ }
+
+ ///
+ /// Whether Agent.RequestAction should be called on this update step.
+ ///
+ ///
+ ///
+ protected virtual bool ShouldRequestAction(DecisionRequestContext context)
+ {
+ return TakeActionsBetweenDecisions;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/DecisionRequester.cs.meta b/com.unity.ml-agents/Runtime/DecisionRequester.cs.meta
new file mode 100644
index 0000000000..bdc416b94b
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/DecisionRequester.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 3a5c9d521e5ef4759a8246a07d52221e
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Demonstrations.meta b/com.unity.ml-agents/Runtime/Demonstrations.meta
new file mode 100644
index 0000000000..85288b5325
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: 85e02c21d231b4f5fa0c5f87e5f907a2
+folderAsset: yes
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationMetaData.cs b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationMetaData.cs
new file mode 100644
index 0000000000..42f67733df
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationMetaData.cs
@@ -0,0 +1,20 @@
+using System;
+using UnityEngine.Serialization;
+
+namespace Unity.MLAgents.Demonstrations
+{
+ ///
+ /// Demonstration meta-data.
+ /// Kept in a struct for easy serialization and deserialization.
+ ///
+ [Serializable]
+ internal class DemonstrationMetaData
+ {
+ [FormerlySerializedAs("numberExperiences")]
+ public int numberSteps;
+ public int numberEpisodes;
+ public float meanReward;
+ public string demonstrationName;
+ public const int ApiVersion = 1;
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationMetaData.cs.meta b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationMetaData.cs.meta
new file mode 100644
index 0000000000..8e6ff39275
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationMetaData.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: af5f3b4258a2d4ead90e733f30cfaa7a
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs
new file mode 100644
index 0000000000..b6daeace32
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs
@@ -0,0 +1,228 @@
+using System.IO.Abstractions;
+using System.Text.RegularExpressions;
+using UnityEngine;
+using System.IO;
+using Unity.MLAgents.Policies;
+using UnityEngine.Serialization;
+
+namespace Unity.MLAgents.Demonstrations
+{
+ ///
+ /// The Demonstration Recorder component facilitates the recording of demonstrations
+ /// used for imitation learning.
+ ///
+ /// Add this component to the [GameObject] containing an
+ /// to enable recording the agent for imitation learning. You must implement the
+ /// function of the agent to provide manual control
+ /// in order to record demonstrations.
+ ///
+ /// See [Imitation Learning - Recording Demonstrations] for more information.
+ ///
+ /// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
+ /// [Imitation Learning - Recording Demonstrations]: https://github.com/Unity-Technologies/ml-agents/blob/release_19_docs/docs//Learning-Environment-Design-Agents.md#recording-demonstrations
+ ///
+ [RequireComponent(typeof(Agent))]
+ [AddComponentMenu("ML Agents/Demonstration Recorder", (int)MenuGroup.Default)]
+ public class DemonstrationRecorder : MonoBehaviour
+ {
+ ///
+ /// Whether or not to record demonstrations.
+ ///
+ [FormerlySerializedAs("record")]
+ [Tooltip("Whether or not to record demonstrations.")]
+ public bool Record;
+
+ ///
+ /// Number of steps to record. The editor will stop playing when it reaches this threshold.
+ /// Set to zero to record indefinitely.
+ ///
+ [Tooltip("Number of steps to record. The editor will stop playing when it reaches this threshold. " +
+ "Set to zero to record indefinitely.")]
+ public int NumStepsToRecord;
+
+ ///
+ /// Base demonstration file name. If multiple files are saved, the additional filenames
+ /// will have a sequence of unique numbers appended.
+ ///
+ [FormerlySerializedAs("demonstrationName")]
+ [Tooltip("Base demonstration file name. If multiple files are saved, the additional " +
+ "filenames will have a unique number appended.")]
+ public string DemonstrationName;
+
+ ///
+ /// Directory to save the demo files. Will default to a "Demonstrations/" folder in the
+ /// Application data path if not specified.
+ ///
+ [FormerlySerializedAs("demonstrationDirectory")]
+ [Tooltip("Directory to save the demo files. Will default to " +
+ "{Application.dataPath}/Demonstrations if not specified.")]
+ public string DemonstrationDirectory;
+
+ DemonstrationWriter m_DemoWriter;
+ internal const int MaxNameLength = 16;
+
+ const string k_ExtensionType = ".demo";
+ const string k_DefaultDirectoryName = "Demonstrations";
+ IFileSystem m_FileSystem;
+
+ Agent m_Agent;
+
+ void OnEnable()
+ {
+ m_Agent = GetComponent();
+ }
+
+ void Update()
+ {
+ if (!Record)
+ {
+ return;
+ }
+
+ LazyInitialize();
+
+ // Quit when num steps to record is reached
+ if (NumStepsToRecord > 0 && m_DemoWriter.NumSteps >= NumStepsToRecord)
+ {
+ Application.Quit(0);
+#if UNITY_EDITOR
+ UnityEditor.EditorApplication.isPlaying = false;
+#endif
+ }
+ }
+
+ ///
+ /// Creates demonstration store for use in recording.
+ /// Has no effect if the demonstration store was already created.
+ ///
+ internal DemonstrationWriter LazyInitialize(IFileSystem fileSystem = null)
+ {
+ if (m_DemoWriter != null)
+ {
+ return m_DemoWriter;
+ }
+
+ if (m_Agent == null)
+ {
+ m_Agent = GetComponent();
+ }
+
+ m_FileSystem = fileSystem ?? new FileSystem();
+ var behaviorParams = GetComponent();
+ if (string.IsNullOrEmpty(DemonstrationName))
+ {
+ DemonstrationName = behaviorParams.BehaviorName;
+ }
+ if (string.IsNullOrEmpty(DemonstrationDirectory))
+ {
+ DemonstrationDirectory = Path.Combine(Application.dataPath, k_DefaultDirectoryName);
+ }
+
+ DemonstrationName = SanitizeName(DemonstrationName, MaxNameLength);
+ var filePath = MakeDemonstrationFilePath(m_FileSystem, DemonstrationDirectory, DemonstrationName);
+ var stream = m_FileSystem.File.Create(filePath);
+ m_DemoWriter = new DemonstrationWriter(stream);
+
+ AddDemonstrationWriterToAgent(m_DemoWriter);
+
+ return m_DemoWriter;
+ }
+
+ ///
+ /// Removes all characters except alphanumerics from demonstration name.
+ /// Shorten name if it is longer than the maxNameLength.
+ ///
+ internal static string SanitizeName(string demoName, int maxNameLength)
+ {
+ var rgx = new Regex("[^a-zA-Z0-9 -]");
+ demoName = rgx.Replace(demoName, "");
+ // If the string is too long, it will overflow the metadata.
+ if (demoName.Length > maxNameLength)
+ {
+ demoName = demoName.Substring(0, maxNameLength);
+ }
+ return demoName;
+ }
+
+ ///
+ /// Gets a unique path for the DemonstrationName in the DemonstrationDirectory.
+ ///
+ ///
+ ///
+ ///
+ ///
+ internal static string MakeDemonstrationFilePath(
+ IFileSystem fileSystem, string demonstrationDirectory, string demonstrationName
+ )
+ {
+ // Create the directory if it doesn't already exist
+ if (!fileSystem.Directory.Exists(demonstrationDirectory))
+ {
+ fileSystem.Directory.CreateDirectory(demonstrationDirectory);
+ }
+
+ var literalName = demonstrationName;
+ var filePath = Path.Combine(demonstrationDirectory, literalName + k_ExtensionType);
+ var uniqueNameCounter = 0;
+ while (fileSystem.File.Exists(filePath))
+ {
+ // TODO should we use a timestamp instead of a counter here? This loops an increasing number of times
+ // as the number of demos increases.
+ literalName = demonstrationName + "_" + uniqueNameCounter;
+ filePath = Path.Combine(demonstrationDirectory, literalName + k_ExtensionType);
+ uniqueNameCounter++;
+ }
+
+ return filePath;
+ }
+
+ ///
+ /// Close the DemonstrationWriter and remove it from the Agent.
+ /// Has no effect if the DemonstrationWriter is already closed (or wasn't opened)
+ ///
+ public void Close()
+ {
+ if (m_DemoWriter != null)
+ {
+ RemoveDemonstrationWriterFromAgent(m_DemoWriter);
+
+ m_DemoWriter.Close();
+ m_DemoWriter = null;
+ }
+ }
+
+ ///
+ /// Clean up the DemonstrationWriter when shutting down or destroying the Agent.
+ ///
+ void OnDestroy()
+ {
+ Close();
+ }
+
+ ///
+ /// Add additional DemonstrationWriter to the Agent. It is still up to the user to Close this
+ /// DemonstrationWriters when recording is done.
+ ///
+ ///
+ public void AddDemonstrationWriterToAgent(DemonstrationWriter demoWriter)
+ {
+ var behaviorParams = GetComponent();
+ demoWriter.Initialize(
+ DemonstrationName,
+ behaviorParams.BrainParameters,
+ behaviorParams.FullyQualifiedBehaviorName
+ );
+ m_Agent.DemonstrationWriters.Add(demoWriter);
+ }
+
+ ///
+ /// Remove additional DemonstrationWriter to the Agent. It is still up to the user to Close this
+ /// DemonstrationWriters when recording is done.
+ ///
+ ///
+ public void RemoveDemonstrationWriterFromAgent(DemonstrationWriter demoWriter)
+ {
+ m_Agent.DemonstrationWriters.Remove(demoWriter);
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs.meta b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs.meta
new file mode 100644
index 0000000000..cde4db8f20
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: f2902496c0120472b90269f94a0aec7e
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationSummary.cs b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationSummary.cs
new file mode 100644
index 0000000000..cb32409913
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationSummary.cs
@@ -0,0 +1,37 @@
+using System;
+using System.Collections.Generic;
+using UnityEngine;
+using Unity.MLAgents.Policies;
+
+namespace Unity.MLAgents.Demonstrations
+{
+ ///
+ /// Summary of a loaded Demonstration file. Only used for display in the Inspector.
+ ///
+ [Serializable]
+ internal class DemonstrationSummary : ScriptableObject
+ {
+ public DemonstrationMetaData metaData;
+ public BrainParameters brainParameters;
+ public List observationSummaries;
+
+ public void Initialize(BrainParameters brainParams,
+ DemonstrationMetaData demonstrationMetaData, List obsSummaries)
+ {
+ brainParameters = brainParams;
+ metaData = demonstrationMetaData;
+ observationSummaries = obsSummaries;
+ }
+ }
+
+
+ ///
+ /// Summary of a loaded Observation. Currently only contains the shape of the Observation.
+ ///
+ /// This is necessary because serialization doesn't support nested containers or arrays.
+ [Serializable]
+ internal struct ObservationSummary
+ {
+ public int[] shape;
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationSummary.cs.meta b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationSummary.cs.meta
new file mode 100644
index 0000000000..91e53800d5
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationSummary.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: a5e0cbcbc514b473399c262dd37541ea
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationWriter.cs b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationWriter.cs
new file mode 100644
index 0000000000..c29bec2c40
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationWriter.cs
@@ -0,0 +1,161 @@
+using System.IO;
+using Google.Protobuf;
+using System.Collections.Generic;
+using Unity.MLAgents.Sensors;
+using Unity.MLAgents.Policies;
+
+namespace Unity.MLAgents.Demonstrations
+{
+ ///
+ /// Responsible for writing demonstration data to stream (typically a file stream).
+ ///
+ ///
+ public class DemonstrationWriter
+ {
+ ///
+ /// Number of bytes reserved for the at the start of the demo file.
+ ///
+ internal const int MetaDataBytes = 32;
+
+ DemonstrationMetaData m_MetaData;
+ Stream m_Writer;
+ float m_CumulativeReward;
+ ObservationWriter m_ObservationWriter = new ObservationWriter();
+
+ ///
+ /// Create a DemonstrationWriter that will write to the specified stream.
+ /// The stream must support writes and seeking.
+ ///
+ ///
+ public DemonstrationWriter(Stream stream)
+ {
+ m_Writer = stream;
+ }
+
+ ///
+ /// Number of steps written so far.
+ ///
+ internal int NumSteps
+ {
+ get { return m_MetaData.numberSteps; }
+ }
+
+ ///
+ /// Writes the initial data to the stream.
+ ///
+ /// Base name of the demonstration file(s).
+ /// The name of the Brain the agent is attached to.
+ /// The parameters of the Brain the agent is attached to.
+ internal void Initialize(
+ string demonstrationName, BrainParameters brainParameters, string brainName)
+ {
+ if (m_Writer == null)
+ {
+ // Already closed
+ return;
+ }
+
+ m_MetaData = new DemonstrationMetaData { demonstrationName = demonstrationName };
+ var metaProto = m_MetaData.ToProto();
+ metaProto.WriteDelimitedTo(m_Writer);
+
+ WriteBrainParameters(brainName, brainParameters);
+ }
+
+ ///
+ /// Writes meta-data. Note that this is called at the *end* of recording, but writes to the
+ /// beginning of the file.
+ ///
+ void WriteMetadata()
+ {
+ if (m_Writer == null)
+ {
+ // Already closed
+ return;
+ }
+
+ var metaProto = m_MetaData.ToProto();
+ var metaProtoBytes = metaProto.ToByteArray();
+ m_Writer.Write(metaProtoBytes, 0, metaProtoBytes.Length);
+ m_Writer.Seek(0, 0);
+ metaProto.WriteDelimitedTo(m_Writer);
+ }
+
+ ///
+ /// Writes brain parameters to file.
+ ///
+ /// The name of the Brain the agent is attached to.
+ /// The parameters of the Brain the agent is attached to.
+ void WriteBrainParameters(string brainName, BrainParameters brainParameters)
+ {
+ if (m_Writer == null)
+ {
+ // Already closed
+ return;
+ }
+
+ // Writes BrainParameters to file.
+ m_Writer.Seek(MetaDataBytes + 1, 0);
+ var brainProto = brainParameters.ToProto(brainName, false);
+ brainProto.WriteDelimitedTo(m_Writer);
+ }
+
+ ///
+ /// Write AgentInfo experience to file.
+ ///
+ /// for the agent being recorded.
+ /// List of sensors to record for the agent.
+ internal void Record(AgentInfo info, List sensors)
+ {
+ if (m_Writer == null)
+ {
+ // Already closed
+ return;
+ }
+
+ // Increment meta-data counters.
+ m_MetaData.numberSteps++;
+ m_CumulativeReward += info.reward;
+ if (info.done)
+ {
+ EndEpisode();
+ }
+
+ // Generate observations and add AgentInfo to file.
+ var agentProto = info.ToInfoActionPairProto();
+ foreach (var sensor in sensors)
+ {
+ agentProto.AgentInfo.Observations.Add(sensor.GetObservationProto(m_ObservationWriter));
+ }
+
+ agentProto.WriteDelimitedTo(m_Writer);
+ }
+
+
+ ///
+ /// Performs all clean-up necessary.
+ ///
+ public void Close()
+ {
+ if (m_Writer == null)
+ {
+ // Already closed
+ return;
+ }
+
+ EndEpisode();
+ m_MetaData.meanReward = m_CumulativeReward / m_MetaData.numberEpisodes;
+ WriteMetadata();
+ m_Writer.Close();
+ m_Writer = null;
+ }
+
+ ///
+ /// Performs necessary episode-completion steps.
+ ///
+ void EndEpisode()
+ {
+ m_MetaData.numberEpisodes += 1;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationWriter.cs.meta b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationWriter.cs.meta
new file mode 100644
index 0000000000..f30f1b22c1
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationWriter.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: ebaf7878a8cc74ee3aae07daf9e1b6f2
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/EnvironmentParameters.cs b/com.unity.ml-agents/Runtime/EnvironmentParameters.cs
new file mode 100644
index 0000000000..fc1c667cd6
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/EnvironmentParameters.cs
@@ -0,0 +1,70 @@
+using System;
+using System.Collections.Generic;
+using Unity.MLAgents.SideChannels;
+
+namespace Unity.MLAgents
+{
+ ///
+ /// A container for the Environment Parameters that may be modified during training.
+ /// The keys for those parameters are defined in the trainer configurations and the
+ /// the values are generated from the training process in features such as Curriculum Learning
+ /// and Environment Parameter Randomization.
+ ///
+ /// One current assumption for all the environment parameters is that they are of type float.
+ ///
+ public sealed class EnvironmentParameters
+ {
+ ///
+ /// The side channel that is used to receive the new parameter values.
+ ///
+ readonly EnvironmentParametersChannel m_Channel;
+
+ ///
+ /// Constructor.
+ ///
+ internal EnvironmentParameters()
+ {
+ m_Channel = new EnvironmentParametersChannel();
+ SideChannelManager.RegisterSideChannel(m_Channel);
+ }
+
+ ///
+ /// Returns the parameter value for the specified key. Returns the default value provided
+ /// if this parameter key does not have a value. Only returns a parameter value if it is
+ /// of type float.
+ ///
+ /// The parameter key
+ /// Default value for this parameter.
+ ///
+ public float GetWithDefault(string key, float defaultValue)
+ {
+ return m_Channel.GetWithDefault(key, defaultValue);
+ }
+
+ ///
+ /// Registers a callback action for the provided parameter key. Will overwrite any
+ /// existing action for that parameter. The callback will be called whenever the parameter
+ /// receives a value from the training process.
+ ///
+ /// The parameter key
+ /// The callback action
+ public void RegisterCallback(string key, Action action)
+ {
+ m_Channel.RegisterCallback(key, action);
+ }
+
+ ///
+ /// Returns a list of all the parameter keys that have received values.
+ ///
+ /// List of parameter keys.
+ public IList Keys()
+ {
+ return m_Channel.ListParameters();
+ }
+
+ internal void Dispose()
+ {
+ SideChannelManager.UnregisterSideChannel(m_Channel);
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/EnvironmentParameters.cs.meta b/com.unity.ml-agents/Runtime/EnvironmentParameters.cs.meta
new file mode 100644
index 0000000000..9e7a85f810
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/EnvironmentParameters.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 90ce0b26bef35484890eac0633b85eed
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/EpisodeIdCounter.cs b/com.unity.ml-agents/Runtime/EpisodeIdCounter.cs
new file mode 100644
index 0000000000..735c7fff96
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/EpisodeIdCounter.cs
@@ -0,0 +1,11 @@
+namespace Unity.MLAgents
+{
+ internal static class EpisodeIdCounter
+ {
+ static int s_Counter;
+ public static int GetEpisodeId()
+ {
+ return s_Counter++;
+ }
+ }
+}
diff --git a/com.unity.ml-agents/Runtime/EpisodeIdCounter.cs.meta b/com.unity.ml-agents/Runtime/EpisodeIdCounter.cs.meta
new file mode 100644
index 0000000000..c377f5004b
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/EpisodeIdCounter.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 847786b7bcf9d4817b3f3879d57517c7
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc.meta b/com.unity.ml-agents/Runtime/Grpc.meta
new file mode 100644
index 0000000000..f9d48bfc0f
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc.meta
@@ -0,0 +1,3 @@
+fileFormatVersion: 2
+guid: 418327e202c7464bb6649d025df1b539
+timeCreated: 1569444731
\ No newline at end of file
diff --git a/com.unity.ml-agents/Runtime/Grpc/AssemblyInfo.cs b/com.unity.ml-agents/Runtime/Grpc/AssemblyInfo.cs
new file mode 100644
index 0000000000..b740e05db8
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/AssemblyInfo.cs
@@ -0,0 +1,7 @@
+using System.Runtime.CompilerServices;
+
+[assembly: InternalsVisibleTo("Unity.ML-Agents")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Runtime.Sensor.Tests")]
+[assembly: InternalsVisibleTo("Unity.ML-Agents.Runtime.Utils.Tests")]
diff --git a/com.unity.ml-agents/Runtime/Grpc/AssemblyInfo.cs.meta b/com.unity.ml-agents/Runtime/Grpc/AssemblyInfo.cs.meta
new file mode 100644
index 0000000000..cf7b4f0f10
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/AssemblyInfo.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 54959ce8e2e574f09b91f80a516acee3
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects.meta
new file mode 100644
index 0000000000..cef92044c3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: 7ebeef5df83b74a048b7f99681672f3b
+folderAsset: yes
+DefaultImporter:
+ externalObjects: {}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentAction.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentAction.cs
new file mode 100644
index 0000000000..3eb0a357a2
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentAction.cs
@@ -0,0 +1,242 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/agent_action.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/agent_action.proto
+ internal static partial class AgentActionReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/agent_action.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static AgentActionReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CjVtbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2FnZW50X2Fj",
+ "dGlvbi5wcm90bxIUY29tbXVuaWNhdG9yX29iamVjdHMijAEKEEFnZW50QWN0",
+ "aW9uUHJvdG8SIQoZdmVjdG9yX2FjdGlvbnNfZGVwcmVjYXRlZBgBIAMoAhIN",
+ "CgV2YWx1ZRgEIAEoAhIaChJjb250aW51b3VzX2FjdGlvbnMYBiADKAISGAoQ",
+ "ZGlzY3JldGVfYWN0aW9ucxgHIAMoBUoECAIQA0oECAMQBEoECAUQBkIlqgIi",
+ "VW5pdHkuTUxBZ2VudHMuQ29tbXVuaWNhdG9yT2JqZWN0c2IGcHJvdG8z"));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.AgentActionProto), global::Unity.MLAgents.CommunicatorObjects.AgentActionProto.Parser, new[]{ "VectorActionsDeprecated", "Value", "ContinuousActions", "DiscreteActions" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ internal sealed partial class AgentActionProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new AgentActionProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.AgentActionReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentActionProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentActionProto(AgentActionProto other) : this() {
+ vectorActionsDeprecated_ = other.vectorActionsDeprecated_.Clone();
+ value_ = other.value_;
+ continuousActions_ = other.continuousActions_.Clone();
+ discreteActions_ = other.discreteActions_.Clone();
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentActionProto Clone() {
+ return new AgentActionProto(this);
+ }
+
+ /// Field number for the "vector_actions_deprecated" field.
+ public const int VectorActionsDeprecatedFieldNumber = 1;
+ private static readonly pb::FieldCodec _repeated_vectorActionsDeprecated_codec
+ = pb::FieldCodec.ForFloat(10);
+ private readonly pbc::RepeatedField vectorActionsDeprecated_ = new pbc::RepeatedField();
+ ///
+ /// mark as deprecated in communicator v1.3.0
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField VectorActionsDeprecated {
+ get { return vectorActionsDeprecated_; }
+ }
+
+ /// Field number for the "value" field.
+ public const int ValueFieldNumber = 4;
+ private float value_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public float Value {
+ get { return value_; }
+ set {
+ value_ = value;
+ }
+ }
+
+ /// Field number for the "continuous_actions" field.
+ public const int ContinuousActionsFieldNumber = 6;
+ private static readonly pb::FieldCodec _repeated_continuousActions_codec
+ = pb::FieldCodec.ForFloat(50);
+ private readonly pbc::RepeatedField continuousActions_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField ContinuousActions {
+ get { return continuousActions_; }
+ }
+
+ /// Field number for the "discrete_actions" field.
+ public const int DiscreteActionsFieldNumber = 7;
+ private static readonly pb::FieldCodec _repeated_discreteActions_codec
+ = pb::FieldCodec.ForInt32(58);
+ private readonly pbc::RepeatedField discreteActions_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField DiscreteActions {
+ get { return discreteActions_; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as AgentActionProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(AgentActionProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if(!vectorActionsDeprecated_.Equals(other.vectorActionsDeprecated_)) return false;
+ if (!pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.Equals(Value, other.Value)) return false;
+ if(!continuousActions_.Equals(other.continuousActions_)) return false;
+ if(!discreteActions_.Equals(other.discreteActions_)) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ hash ^= vectorActionsDeprecated_.GetHashCode();
+ if (Value != 0F) hash ^= pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.GetHashCode(Value);
+ hash ^= continuousActions_.GetHashCode();
+ hash ^= discreteActions_.GetHashCode();
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ vectorActionsDeprecated_.WriteTo(output, _repeated_vectorActionsDeprecated_codec);
+ if (Value != 0F) {
+ output.WriteRawTag(37);
+ output.WriteFloat(Value);
+ }
+ continuousActions_.WriteTo(output, _repeated_continuousActions_codec);
+ discreteActions_.WriteTo(output, _repeated_discreteActions_codec);
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ size += vectorActionsDeprecated_.CalculateSize(_repeated_vectorActionsDeprecated_codec);
+ if (Value != 0F) {
+ size += 1 + 4;
+ }
+ size += continuousActions_.CalculateSize(_repeated_continuousActions_codec);
+ size += discreteActions_.CalculateSize(_repeated_discreteActions_codec);
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(AgentActionProto other) {
+ if (other == null) {
+ return;
+ }
+ vectorActionsDeprecated_.Add(other.vectorActionsDeprecated_);
+ if (other.Value != 0F) {
+ Value = other.Value;
+ }
+ continuousActions_.Add(other.continuousActions_);
+ discreteActions_.Add(other.discreteActions_);
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 10:
+ case 13: {
+ vectorActionsDeprecated_.AddEntriesFrom(input, _repeated_vectorActionsDeprecated_codec);
+ break;
+ }
+ case 37: {
+ Value = input.ReadFloat();
+ break;
+ }
+ case 50:
+ case 53: {
+ continuousActions_.AddEntriesFrom(input, _repeated_continuousActions_codec);
+ break;
+ }
+ case 58:
+ case 56: {
+ discreteActions_.AddEntriesFrom(input, _repeated_discreteActions_codec);
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentAction.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentAction.cs.meta
new file mode 100644
index 0000000000..f47d94375b
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentAction.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: b1fa94db54b734224927bb4b322227cd
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfo.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfo.cs
new file mode 100644
index 0000000000..187f2fdab7
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfo.cs
@@ -0,0 +1,361 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/agent_info.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/agent_info.proto
+ internal static partial class AgentInfoReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/agent_info.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static AgentInfoReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CjNtbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2FnZW50X2lu",
+ "Zm8ucHJvdG8SFGNvbW11bmljYXRvcl9vYmplY3RzGjRtbGFnZW50c19lbnZz",
+ "L2NvbW11bmljYXRvcl9vYmplY3RzL29ic2VydmF0aW9uLnByb3RvIvkBCg5B",
+ "Z2VudEluZm9Qcm90bxIOCgZyZXdhcmQYByABKAISDAoEZG9uZRgIIAEoCBIY",
+ "ChBtYXhfc3RlcF9yZWFjaGVkGAkgASgIEgoKAmlkGAogASgFEhMKC2FjdGlv",
+ "bl9tYXNrGAsgAygIEjwKDG9ic2VydmF0aW9ucxgNIAMoCzImLmNvbW11bmlj",
+ "YXRvcl9vYmplY3RzLk9ic2VydmF0aW9uUHJvdG8SEAoIZ3JvdXBfaWQYDiAB",
+ "KAUSFAoMZ3JvdXBfcmV3YXJkGA8gASgCSgQIARACSgQIAhADSgQIAxAESgQI",
+ "BBAFSgQIBRAGSgQIBhAHSgQIDBANQiWqAiJVbml0eS5NTEFnZW50cy5Db21t",
+ "dW5pY2F0b3JPYmplY3RzYgZwcm90bzM="));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { global::Unity.MLAgents.CommunicatorObjects.ObservationReflection.Descriptor, },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.AgentInfoProto), global::Unity.MLAgents.CommunicatorObjects.AgentInfoProto.Parser, new[]{ "Reward", "Done", "MaxStepReached", "Id", "ActionMask", "Observations", "GroupId", "GroupReward" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ internal sealed partial class AgentInfoProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new AgentInfoProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.AgentInfoReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentInfoProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentInfoProto(AgentInfoProto other) : this() {
+ reward_ = other.reward_;
+ done_ = other.done_;
+ maxStepReached_ = other.maxStepReached_;
+ id_ = other.id_;
+ actionMask_ = other.actionMask_.Clone();
+ observations_ = other.observations_.Clone();
+ groupId_ = other.groupId_;
+ groupReward_ = other.groupReward_;
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentInfoProto Clone() {
+ return new AgentInfoProto(this);
+ }
+
+ /// Field number for the "reward" field.
+ public const int RewardFieldNumber = 7;
+ private float reward_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public float Reward {
+ get { return reward_; }
+ set {
+ reward_ = value;
+ }
+ }
+
+ /// Field number for the "done" field.
+ public const int DoneFieldNumber = 8;
+ private bool done_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Done {
+ get { return done_; }
+ set {
+ done_ = value;
+ }
+ }
+
+ /// Field number for the "max_step_reached" field.
+ public const int MaxStepReachedFieldNumber = 9;
+ private bool maxStepReached_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool MaxStepReached {
+ get { return maxStepReached_; }
+ set {
+ maxStepReached_ = value;
+ }
+ }
+
+ /// Field number for the "id" field.
+ public const int IdFieldNumber = 10;
+ private int id_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int Id {
+ get { return id_; }
+ set {
+ id_ = value;
+ }
+ }
+
+ /// Field number for the "action_mask" field.
+ public const int ActionMaskFieldNumber = 11;
+ private static readonly pb::FieldCodec _repeated_actionMask_codec
+ = pb::FieldCodec.ForBool(90);
+ private readonly pbc::RepeatedField actionMask_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField ActionMask {
+ get { return actionMask_; }
+ }
+
+ /// Field number for the "observations" field.
+ public const int ObservationsFieldNumber = 13;
+ private static readonly pb::FieldCodec _repeated_observations_codec
+ = pb::FieldCodec.ForMessage(106, global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Parser);
+ private readonly pbc::RepeatedField observations_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField Observations {
+ get { return observations_; }
+ }
+
+ /// Field number for the "group_id" field.
+ public const int GroupIdFieldNumber = 14;
+ private int groupId_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int GroupId {
+ get { return groupId_; }
+ set {
+ groupId_ = value;
+ }
+ }
+
+ /// Field number for the "group_reward" field.
+ public const int GroupRewardFieldNumber = 15;
+ private float groupReward_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public float GroupReward {
+ get { return groupReward_; }
+ set {
+ groupReward_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as AgentInfoProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(AgentInfoProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (!pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.Equals(Reward, other.Reward)) return false;
+ if (Done != other.Done) return false;
+ if (MaxStepReached != other.MaxStepReached) return false;
+ if (Id != other.Id) return false;
+ if(!actionMask_.Equals(other.actionMask_)) return false;
+ if(!observations_.Equals(other.observations_)) return false;
+ if (GroupId != other.GroupId) return false;
+ if (!pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.Equals(GroupReward, other.GroupReward)) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (Reward != 0F) hash ^= pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.GetHashCode(Reward);
+ if (Done != false) hash ^= Done.GetHashCode();
+ if (MaxStepReached != false) hash ^= MaxStepReached.GetHashCode();
+ if (Id != 0) hash ^= Id.GetHashCode();
+ hash ^= actionMask_.GetHashCode();
+ hash ^= observations_.GetHashCode();
+ if (GroupId != 0) hash ^= GroupId.GetHashCode();
+ if (GroupReward != 0F) hash ^= pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.GetHashCode(GroupReward);
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (Reward != 0F) {
+ output.WriteRawTag(61);
+ output.WriteFloat(Reward);
+ }
+ if (Done != false) {
+ output.WriteRawTag(64);
+ output.WriteBool(Done);
+ }
+ if (MaxStepReached != false) {
+ output.WriteRawTag(72);
+ output.WriteBool(MaxStepReached);
+ }
+ if (Id != 0) {
+ output.WriteRawTag(80);
+ output.WriteInt32(Id);
+ }
+ actionMask_.WriteTo(output, _repeated_actionMask_codec);
+ observations_.WriteTo(output, _repeated_observations_codec);
+ if (GroupId != 0) {
+ output.WriteRawTag(112);
+ output.WriteInt32(GroupId);
+ }
+ if (GroupReward != 0F) {
+ output.WriteRawTag(125);
+ output.WriteFloat(GroupReward);
+ }
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (Reward != 0F) {
+ size += 1 + 4;
+ }
+ if (Done != false) {
+ size += 1 + 1;
+ }
+ if (MaxStepReached != false) {
+ size += 1 + 1;
+ }
+ if (Id != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(Id);
+ }
+ size += actionMask_.CalculateSize(_repeated_actionMask_codec);
+ size += observations_.CalculateSize(_repeated_observations_codec);
+ if (GroupId != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(GroupId);
+ }
+ if (GroupReward != 0F) {
+ size += 1 + 4;
+ }
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(AgentInfoProto other) {
+ if (other == null) {
+ return;
+ }
+ if (other.Reward != 0F) {
+ Reward = other.Reward;
+ }
+ if (other.Done != false) {
+ Done = other.Done;
+ }
+ if (other.MaxStepReached != false) {
+ MaxStepReached = other.MaxStepReached;
+ }
+ if (other.Id != 0) {
+ Id = other.Id;
+ }
+ actionMask_.Add(other.actionMask_);
+ observations_.Add(other.observations_);
+ if (other.GroupId != 0) {
+ GroupId = other.GroupId;
+ }
+ if (other.GroupReward != 0F) {
+ GroupReward = other.GroupReward;
+ }
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 61: {
+ Reward = input.ReadFloat();
+ break;
+ }
+ case 64: {
+ Done = input.ReadBool();
+ break;
+ }
+ case 72: {
+ MaxStepReached = input.ReadBool();
+ break;
+ }
+ case 80: {
+ Id = input.ReadInt32();
+ break;
+ }
+ case 90:
+ case 88: {
+ actionMask_.AddEntriesFrom(input, _repeated_actionMask_codec);
+ break;
+ }
+ case 106: {
+ observations_.AddEntriesFrom(input, _repeated_observations_codec);
+ break;
+ }
+ case 112: {
+ GroupId = input.ReadInt32();
+ break;
+ }
+ case 125: {
+ GroupReward = input.ReadFloat();
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfo.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfo.cs.meta
new file mode 100644
index 0000000000..07ed361456
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfo.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: ecaddd3a8141a4854a4d2c7fe8bd6a75
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfoActionPair.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfoActionPair.cs
new file mode 100644
index 0000000000..37cd219c73
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfoActionPair.cs
@@ -0,0 +1,219 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/agent_info_action_pair.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/agent_info_action_pair.proto
+ internal static partial class AgentInfoActionPairReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/agent_info_action_pair.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static AgentInfoActionPairReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "Cj9tbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2FnZW50X2lu",
+ "Zm9fYWN0aW9uX3BhaXIucHJvdG8SFGNvbW11bmljYXRvcl9vYmplY3RzGjNt",
+ "bGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2FnZW50X2luZm8u",
+ "cHJvdG8aNW1sYWdlbnRzX2VudnMvY29tbXVuaWNhdG9yX29iamVjdHMvYWdl",
+ "bnRfYWN0aW9uLnByb3RvIpEBChhBZ2VudEluZm9BY3Rpb25QYWlyUHJvdG8S",
+ "OAoKYWdlbnRfaW5mbxgBIAEoCzIkLmNvbW11bmljYXRvcl9vYmplY3RzLkFn",
+ "ZW50SW5mb1Byb3RvEjsKC2FjdGlvbl9pbmZvGAIgASgLMiYuY29tbXVuaWNh",
+ "dG9yX29iamVjdHMuQWdlbnRBY3Rpb25Qcm90b0IlqgIiVW5pdHkuTUxBZ2Vu",
+ "dHMuQ29tbXVuaWNhdG9yT2JqZWN0c2IGcHJvdG8z"));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { global::Unity.MLAgents.CommunicatorObjects.AgentInfoReflection.Descriptor, global::Unity.MLAgents.CommunicatorObjects.AgentActionReflection.Descriptor, },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.AgentInfoActionPairProto), global::Unity.MLAgents.CommunicatorObjects.AgentInfoActionPairProto.Parser, new[]{ "AgentInfo", "ActionInfo" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ internal sealed partial class AgentInfoActionPairProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new AgentInfoActionPairProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.AgentInfoActionPairReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentInfoActionPairProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentInfoActionPairProto(AgentInfoActionPairProto other) : this() {
+ AgentInfo = other.agentInfo_ != null ? other.AgentInfo.Clone() : null;
+ ActionInfo = other.actionInfo_ != null ? other.ActionInfo.Clone() : null;
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public AgentInfoActionPairProto Clone() {
+ return new AgentInfoActionPairProto(this);
+ }
+
+ /// Field number for the "agent_info" field.
+ public const int AgentInfoFieldNumber = 1;
+ private global::Unity.MLAgents.CommunicatorObjects.AgentInfoProto agentInfo_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Unity.MLAgents.CommunicatorObjects.AgentInfoProto AgentInfo {
+ get { return agentInfo_; }
+ set {
+ agentInfo_ = value;
+ }
+ }
+
+ /// Field number for the "action_info" field.
+ public const int ActionInfoFieldNumber = 2;
+ private global::Unity.MLAgents.CommunicatorObjects.AgentActionProto actionInfo_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Unity.MLAgents.CommunicatorObjects.AgentActionProto ActionInfo {
+ get { return actionInfo_; }
+ set {
+ actionInfo_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as AgentInfoActionPairProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(AgentInfoActionPairProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (!object.Equals(AgentInfo, other.AgentInfo)) return false;
+ if (!object.Equals(ActionInfo, other.ActionInfo)) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (agentInfo_ != null) hash ^= AgentInfo.GetHashCode();
+ if (actionInfo_ != null) hash ^= ActionInfo.GetHashCode();
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (agentInfo_ != null) {
+ output.WriteRawTag(10);
+ output.WriteMessage(AgentInfo);
+ }
+ if (actionInfo_ != null) {
+ output.WriteRawTag(18);
+ output.WriteMessage(ActionInfo);
+ }
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (agentInfo_ != null) {
+ size += 1 + pb::CodedOutputStream.ComputeMessageSize(AgentInfo);
+ }
+ if (actionInfo_ != null) {
+ size += 1 + pb::CodedOutputStream.ComputeMessageSize(ActionInfo);
+ }
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(AgentInfoActionPairProto other) {
+ if (other == null) {
+ return;
+ }
+ if (other.agentInfo_ != null) {
+ if (agentInfo_ == null) {
+ agentInfo_ = new global::Unity.MLAgents.CommunicatorObjects.AgentInfoProto();
+ }
+ AgentInfo.MergeFrom(other.AgentInfo);
+ }
+ if (other.actionInfo_ != null) {
+ if (actionInfo_ == null) {
+ actionInfo_ = new global::Unity.MLAgents.CommunicatorObjects.AgentActionProto();
+ }
+ ActionInfo.MergeFrom(other.ActionInfo);
+ }
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 10: {
+ if (agentInfo_ == null) {
+ agentInfo_ = new global::Unity.MLAgents.CommunicatorObjects.AgentInfoProto();
+ }
+ input.ReadMessage(agentInfo_);
+ break;
+ }
+ case 18: {
+ if (actionInfo_ == null) {
+ actionInfo_ = new global::Unity.MLAgents.CommunicatorObjects.AgentActionProto();
+ }
+ input.ReadMessage(actionInfo_);
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfoActionPair.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfoActionPair.cs.meta
new file mode 100644
index 0000000000..7474dcae69
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/AgentInfoActionPair.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 29577366657494c678558b0643abcb30
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/BrainParameters.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/BrainParameters.cs
new file mode 100644
index 0000000000..65b57f4ea3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/BrainParameters.cs
@@ -0,0 +1,524 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/brain_parameters.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/brain_parameters.proto
+ internal static partial class BrainParametersReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/brain_parameters.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static BrainParametersReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CjltbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2JyYWluX3Bh",
+ "cmFtZXRlcnMucHJvdG8SFGNvbW11bmljYXRvcl9vYmplY3RzGjNtbGFnZW50",
+ "c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL3NwYWNlX3R5cGUucHJvdG8i",
+ "iwEKD0FjdGlvblNwZWNQcm90bxIeChZudW1fY29udGludW91c19hY3Rpb25z",
+ "GAEgASgFEhwKFG51bV9kaXNjcmV0ZV9hY3Rpb25zGAIgASgFEh0KFWRpc2Ny",
+ "ZXRlX2JyYW5jaF9zaXplcxgDIAMoBRIbChNhY3Rpb25fZGVzY3JpcHRpb25z",
+ "GAQgAygJIrYCChRCcmFpblBhcmFtZXRlcnNQcm90bxIlCh12ZWN0b3JfYWN0",
+ "aW9uX3NpemVfZGVwcmVjYXRlZBgDIAMoBRItCiV2ZWN0b3JfYWN0aW9uX2Rl",
+ "c2NyaXB0aW9uc19kZXByZWNhdGVkGAUgAygJElEKI3ZlY3Rvcl9hY3Rpb25f",
+ "c3BhY2VfdHlwZV9kZXByZWNhdGVkGAYgASgOMiQuY29tbXVuaWNhdG9yX29i",
+ "amVjdHMuU3BhY2VUeXBlUHJvdG8SEgoKYnJhaW5fbmFtZRgHIAEoCRITCgtp",
+ "c190cmFpbmluZxgIIAEoCBI6CgthY3Rpb25fc3BlYxgJIAEoCzIlLmNvbW11",
+ "bmljYXRvcl9vYmplY3RzLkFjdGlvblNwZWNQcm90b0oECAEQAkoECAIQA0oE",
+ "CAQQBUIlqgIiVW5pdHkuTUxBZ2VudHMuQ29tbXVuaWNhdG9yT2JqZWN0c2IG",
+ "cHJvdG8z"));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { global::Unity.MLAgents.CommunicatorObjects.SpaceTypeReflection.Descriptor, },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.ActionSpecProto), global::Unity.MLAgents.CommunicatorObjects.ActionSpecProto.Parser, new[]{ "NumContinuousActions", "NumDiscreteActions", "DiscreteBranchSizes", "ActionDescriptions" }, null, null, null),
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.BrainParametersProto), global::Unity.MLAgents.CommunicatorObjects.BrainParametersProto.Parser, new[]{ "VectorActionSizeDeprecated", "VectorActionDescriptionsDeprecated", "VectorActionSpaceTypeDeprecated", "BrainName", "IsTraining", "ActionSpec" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ internal sealed partial class ActionSpecProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new ActionSpecProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.BrainParametersReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ActionSpecProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ActionSpecProto(ActionSpecProto other) : this() {
+ numContinuousActions_ = other.numContinuousActions_;
+ numDiscreteActions_ = other.numDiscreteActions_;
+ discreteBranchSizes_ = other.discreteBranchSizes_.Clone();
+ actionDescriptions_ = other.actionDescriptions_.Clone();
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ActionSpecProto Clone() {
+ return new ActionSpecProto(this);
+ }
+
+ /// Field number for the "num_continuous_actions" field.
+ public const int NumContinuousActionsFieldNumber = 1;
+ private int numContinuousActions_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int NumContinuousActions {
+ get { return numContinuousActions_; }
+ set {
+ numContinuousActions_ = value;
+ }
+ }
+
+ /// Field number for the "num_discrete_actions" field.
+ public const int NumDiscreteActionsFieldNumber = 2;
+ private int numDiscreteActions_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int NumDiscreteActions {
+ get { return numDiscreteActions_; }
+ set {
+ numDiscreteActions_ = value;
+ }
+ }
+
+ /// Field number for the "discrete_branch_sizes" field.
+ public const int DiscreteBranchSizesFieldNumber = 3;
+ private static readonly pb::FieldCodec _repeated_discreteBranchSizes_codec
+ = pb::FieldCodec.ForInt32(26);
+ private readonly pbc::RepeatedField discreteBranchSizes_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField DiscreteBranchSizes {
+ get { return discreteBranchSizes_; }
+ }
+
+ /// Field number for the "action_descriptions" field.
+ public const int ActionDescriptionsFieldNumber = 4;
+ private static readonly pb::FieldCodec _repeated_actionDescriptions_codec
+ = pb::FieldCodec.ForString(34);
+ private readonly pbc::RepeatedField actionDescriptions_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField ActionDescriptions {
+ get { return actionDescriptions_; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as ActionSpecProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(ActionSpecProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (NumContinuousActions != other.NumContinuousActions) return false;
+ if (NumDiscreteActions != other.NumDiscreteActions) return false;
+ if(!discreteBranchSizes_.Equals(other.discreteBranchSizes_)) return false;
+ if(!actionDescriptions_.Equals(other.actionDescriptions_)) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (NumContinuousActions != 0) hash ^= NumContinuousActions.GetHashCode();
+ if (NumDiscreteActions != 0) hash ^= NumDiscreteActions.GetHashCode();
+ hash ^= discreteBranchSizes_.GetHashCode();
+ hash ^= actionDescriptions_.GetHashCode();
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (NumContinuousActions != 0) {
+ output.WriteRawTag(8);
+ output.WriteInt32(NumContinuousActions);
+ }
+ if (NumDiscreteActions != 0) {
+ output.WriteRawTag(16);
+ output.WriteInt32(NumDiscreteActions);
+ }
+ discreteBranchSizes_.WriteTo(output, _repeated_discreteBranchSizes_codec);
+ actionDescriptions_.WriteTo(output, _repeated_actionDescriptions_codec);
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (NumContinuousActions != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(NumContinuousActions);
+ }
+ if (NumDiscreteActions != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(NumDiscreteActions);
+ }
+ size += discreteBranchSizes_.CalculateSize(_repeated_discreteBranchSizes_codec);
+ size += actionDescriptions_.CalculateSize(_repeated_actionDescriptions_codec);
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(ActionSpecProto other) {
+ if (other == null) {
+ return;
+ }
+ if (other.NumContinuousActions != 0) {
+ NumContinuousActions = other.NumContinuousActions;
+ }
+ if (other.NumDiscreteActions != 0) {
+ NumDiscreteActions = other.NumDiscreteActions;
+ }
+ discreteBranchSizes_.Add(other.discreteBranchSizes_);
+ actionDescriptions_.Add(other.actionDescriptions_);
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 8: {
+ NumContinuousActions = input.ReadInt32();
+ break;
+ }
+ case 16: {
+ NumDiscreteActions = input.ReadInt32();
+ break;
+ }
+ case 26:
+ case 24: {
+ discreteBranchSizes_.AddEntriesFrom(input, _repeated_discreteBranchSizes_codec);
+ break;
+ }
+ case 34: {
+ actionDescriptions_.AddEntriesFrom(input, _repeated_actionDescriptions_codec);
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ internal sealed partial class BrainParametersProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new BrainParametersProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.BrainParametersReflection.Descriptor.MessageTypes[1]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public BrainParametersProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public BrainParametersProto(BrainParametersProto other) : this() {
+ vectorActionSizeDeprecated_ = other.vectorActionSizeDeprecated_.Clone();
+ vectorActionDescriptionsDeprecated_ = other.vectorActionDescriptionsDeprecated_.Clone();
+ vectorActionSpaceTypeDeprecated_ = other.vectorActionSpaceTypeDeprecated_;
+ brainName_ = other.brainName_;
+ isTraining_ = other.isTraining_;
+ ActionSpec = other.actionSpec_ != null ? other.ActionSpec.Clone() : null;
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public BrainParametersProto Clone() {
+ return new BrainParametersProto(this);
+ }
+
+ /// Field number for the "vector_action_size_deprecated" field.
+ public const int VectorActionSizeDeprecatedFieldNumber = 3;
+ private static readonly pb::FieldCodec _repeated_vectorActionSizeDeprecated_codec
+ = pb::FieldCodec.ForInt32(26);
+ private readonly pbc::RepeatedField vectorActionSizeDeprecated_ = new pbc::RepeatedField();
+ ///
+ /// mark as deprecated in communicator v1.3.0
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField VectorActionSizeDeprecated {
+ get { return vectorActionSizeDeprecated_; }
+ }
+
+ /// Field number for the "vector_action_descriptions_deprecated" field.
+ public const int VectorActionDescriptionsDeprecatedFieldNumber = 5;
+ private static readonly pb::FieldCodec _repeated_vectorActionDescriptionsDeprecated_codec
+ = pb::FieldCodec.ForString(42);
+ private readonly pbc::RepeatedField vectorActionDescriptionsDeprecated_ = new pbc::RepeatedField();
+ ///
+ /// mark as deprecated in communicator v1.3.0
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField VectorActionDescriptionsDeprecated {
+ get { return vectorActionDescriptionsDeprecated_; }
+ }
+
+ /// Field number for the "vector_action_space_type_deprecated" field.
+ public const int VectorActionSpaceTypeDeprecatedFieldNumber = 6;
+ private global::Unity.MLAgents.CommunicatorObjects.SpaceTypeProto vectorActionSpaceTypeDeprecated_ = 0;
+ ///
+ /// mark as deprecated in communicator v1.3.0
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Unity.MLAgents.CommunicatorObjects.SpaceTypeProto VectorActionSpaceTypeDeprecated {
+ get { return vectorActionSpaceTypeDeprecated_; }
+ set {
+ vectorActionSpaceTypeDeprecated_ = value;
+ }
+ }
+
+ /// Field number for the "brain_name" field.
+ public const int BrainNameFieldNumber = 7;
+ private string brainName_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string BrainName {
+ get { return brainName_; }
+ set {
+ brainName_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// Field number for the "is_training" field.
+ public const int IsTrainingFieldNumber = 8;
+ private bool isTraining_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool IsTraining {
+ get { return isTraining_; }
+ set {
+ isTraining_ = value;
+ }
+ }
+
+ /// Field number for the "action_spec" field.
+ public const int ActionSpecFieldNumber = 9;
+ private global::Unity.MLAgents.CommunicatorObjects.ActionSpecProto actionSpec_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Unity.MLAgents.CommunicatorObjects.ActionSpecProto ActionSpec {
+ get { return actionSpec_; }
+ set {
+ actionSpec_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as BrainParametersProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(BrainParametersProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if(!vectorActionSizeDeprecated_.Equals(other.vectorActionSizeDeprecated_)) return false;
+ if(!vectorActionDescriptionsDeprecated_.Equals(other.vectorActionDescriptionsDeprecated_)) return false;
+ if (VectorActionSpaceTypeDeprecated != other.VectorActionSpaceTypeDeprecated) return false;
+ if (BrainName != other.BrainName) return false;
+ if (IsTraining != other.IsTraining) return false;
+ if (!object.Equals(ActionSpec, other.ActionSpec)) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ hash ^= vectorActionSizeDeprecated_.GetHashCode();
+ hash ^= vectorActionDescriptionsDeprecated_.GetHashCode();
+ if (VectorActionSpaceTypeDeprecated != 0) hash ^= VectorActionSpaceTypeDeprecated.GetHashCode();
+ if (BrainName.Length != 0) hash ^= BrainName.GetHashCode();
+ if (IsTraining != false) hash ^= IsTraining.GetHashCode();
+ if (actionSpec_ != null) hash ^= ActionSpec.GetHashCode();
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ vectorActionSizeDeprecated_.WriteTo(output, _repeated_vectorActionSizeDeprecated_codec);
+ vectorActionDescriptionsDeprecated_.WriteTo(output, _repeated_vectorActionDescriptionsDeprecated_codec);
+ if (VectorActionSpaceTypeDeprecated != 0) {
+ output.WriteRawTag(48);
+ output.WriteEnum((int) VectorActionSpaceTypeDeprecated);
+ }
+ if (BrainName.Length != 0) {
+ output.WriteRawTag(58);
+ output.WriteString(BrainName);
+ }
+ if (IsTraining != false) {
+ output.WriteRawTag(64);
+ output.WriteBool(IsTraining);
+ }
+ if (actionSpec_ != null) {
+ output.WriteRawTag(74);
+ output.WriteMessage(ActionSpec);
+ }
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ size += vectorActionSizeDeprecated_.CalculateSize(_repeated_vectorActionSizeDeprecated_codec);
+ size += vectorActionDescriptionsDeprecated_.CalculateSize(_repeated_vectorActionDescriptionsDeprecated_codec);
+ if (VectorActionSpaceTypeDeprecated != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeEnumSize((int) VectorActionSpaceTypeDeprecated);
+ }
+ if (BrainName.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(BrainName);
+ }
+ if (IsTraining != false) {
+ size += 1 + 1;
+ }
+ if (actionSpec_ != null) {
+ size += 1 + pb::CodedOutputStream.ComputeMessageSize(ActionSpec);
+ }
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(BrainParametersProto other) {
+ if (other == null) {
+ return;
+ }
+ vectorActionSizeDeprecated_.Add(other.vectorActionSizeDeprecated_);
+ vectorActionDescriptionsDeprecated_.Add(other.vectorActionDescriptionsDeprecated_);
+ if (other.VectorActionSpaceTypeDeprecated != 0) {
+ VectorActionSpaceTypeDeprecated = other.VectorActionSpaceTypeDeprecated;
+ }
+ if (other.BrainName.Length != 0) {
+ BrainName = other.BrainName;
+ }
+ if (other.IsTraining != false) {
+ IsTraining = other.IsTraining;
+ }
+ if (other.actionSpec_ != null) {
+ if (actionSpec_ == null) {
+ actionSpec_ = new global::Unity.MLAgents.CommunicatorObjects.ActionSpecProto();
+ }
+ ActionSpec.MergeFrom(other.ActionSpec);
+ }
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 26:
+ case 24: {
+ vectorActionSizeDeprecated_.AddEntriesFrom(input, _repeated_vectorActionSizeDeprecated_codec);
+ break;
+ }
+ case 42: {
+ vectorActionDescriptionsDeprecated_.AddEntriesFrom(input, _repeated_vectorActionDescriptionsDeprecated_codec);
+ break;
+ }
+ case 48: {
+ vectorActionSpaceTypeDeprecated_ = (global::Unity.MLAgents.CommunicatorObjects.SpaceTypeProto) input.ReadEnum();
+ break;
+ }
+ case 58: {
+ BrainName = input.ReadString();
+ break;
+ }
+ case 64: {
+ IsTraining = input.ReadBool();
+ break;
+ }
+ case 74: {
+ if (actionSpec_ == null) {
+ actionSpec_ = new global::Unity.MLAgents.CommunicatorObjects.ActionSpecProto();
+ }
+ input.ReadMessage(actionSpec_);
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/BrainParameters.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/BrainParameters.cs.meta
new file mode 100644
index 0000000000..447602fcc2
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/BrainParameters.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 26f9a93df956e4ee88c1cf5f31017f0e
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Capabilities.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Capabilities.cs
new file mode 100644
index 0000000000..ac267f4c2f
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Capabilities.cs
@@ -0,0 +1,373 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/capabilities.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/capabilities.proto
+ internal static partial class CapabilitiesReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/capabilities.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static CapabilitiesReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CjVtbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2NhcGFiaWxp",
+ "dGllcy5wcm90bxIUY29tbXVuaWNhdG9yX29iamVjdHMi7AEKGFVuaXR5UkxD",
+ "YXBhYmlsaXRpZXNQcm90bxIaChJiYXNlUkxDYXBhYmlsaXRpZXMYASABKAgS",
+ "IwobY29uY2F0ZW5hdGVkUG5nT2JzZXJ2YXRpb25zGAIgASgIEiAKGGNvbXBy",
+ "ZXNzZWRDaGFubmVsTWFwcGluZxgDIAEoCBIVCg1oeWJyaWRBY3Rpb25zGAQg",
+ "ASgIEhkKEXRyYWluaW5nQW5hbHl0aWNzGAUgASgIEiEKGXZhcmlhYmxlTGVu",
+ "Z3RoT2JzZXJ2YXRpb24YBiABKAgSGAoQbXVsdGlBZ2VudEdyb3VwcxgHIAEo",
+ "CEIlqgIiVW5pdHkuTUxBZ2VudHMuQ29tbXVuaWNhdG9yT2JqZWN0c2IGcHJv",
+ "dG8z"));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.UnityRLCapabilitiesProto), global::Unity.MLAgents.CommunicatorObjects.UnityRLCapabilitiesProto.Parser, new[]{ "BaseRLCapabilities", "ConcatenatedPngObservations", "CompressedChannelMapping", "HybridActions", "TrainingAnalytics", "VariableLengthObservation", "MultiAgentGroups" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ ///
+ ///
+ /// A Capabilities message that will communicate both C# and Python
+ /// what features are available to both.
+ ///
+ internal sealed partial class UnityRLCapabilitiesProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new UnityRLCapabilitiesProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.CapabilitiesReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public UnityRLCapabilitiesProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public UnityRLCapabilitiesProto(UnityRLCapabilitiesProto other) : this() {
+ baseRLCapabilities_ = other.baseRLCapabilities_;
+ concatenatedPngObservations_ = other.concatenatedPngObservations_;
+ compressedChannelMapping_ = other.compressedChannelMapping_;
+ hybridActions_ = other.hybridActions_;
+ trainingAnalytics_ = other.trainingAnalytics_;
+ variableLengthObservation_ = other.variableLengthObservation_;
+ multiAgentGroups_ = other.multiAgentGroups_;
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public UnityRLCapabilitiesProto Clone() {
+ return new UnityRLCapabilitiesProto(this);
+ }
+
+ /// Field number for the "baseRLCapabilities" field.
+ public const int BaseRLCapabilitiesFieldNumber = 1;
+ private bool baseRLCapabilities_;
+ ///
+ /// These are the 1.0 capabilities.
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool BaseRLCapabilities {
+ get { return baseRLCapabilities_; }
+ set {
+ baseRLCapabilities_ = value;
+ }
+ }
+
+ /// Field number for the "concatenatedPngObservations" field.
+ public const int ConcatenatedPngObservationsFieldNumber = 2;
+ private bool concatenatedPngObservations_;
+ ///
+ /// concatenated PNG files for compressed visual observations with >3 channels.
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool ConcatenatedPngObservations {
+ get { return concatenatedPngObservations_; }
+ set {
+ concatenatedPngObservations_ = value;
+ }
+ }
+
+ /// Field number for the "compressedChannelMapping" field.
+ public const int CompressedChannelMappingFieldNumber = 3;
+ private bool compressedChannelMapping_;
+ ///
+ /// compression mapping for stacking compressed observations.
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool CompressedChannelMapping {
+ get { return compressedChannelMapping_; }
+ set {
+ compressedChannelMapping_ = value;
+ }
+ }
+
+ /// Field number for the "hybridActions" field.
+ public const int HybridActionsFieldNumber = 4;
+ private bool hybridActions_;
+ ///
+ /// support for hybrid action spaces (discrete + continuous)
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool HybridActions {
+ get { return hybridActions_; }
+ set {
+ hybridActions_ = value;
+ }
+ }
+
+ /// Field number for the "trainingAnalytics" field.
+ public const int TrainingAnalyticsFieldNumber = 5;
+ private bool trainingAnalytics_;
+ ///
+ /// support for training analytics
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool TrainingAnalytics {
+ get { return trainingAnalytics_; }
+ set {
+ trainingAnalytics_ = value;
+ }
+ }
+
+ /// Field number for the "variableLengthObservation" field.
+ public const int VariableLengthObservationFieldNumber = 6;
+ private bool variableLengthObservation_;
+ ///
+ /// Support for variable length observations of rank 2
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool VariableLengthObservation {
+ get { return variableLengthObservation_; }
+ set {
+ variableLengthObservation_ = value;
+ }
+ }
+
+ /// Field number for the "multiAgentGroups" field.
+ public const int MultiAgentGroupsFieldNumber = 7;
+ private bool multiAgentGroups_;
+ ///
+ /// Support for multi agent groups and group rewards
+ ///
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool MultiAgentGroups {
+ get { return multiAgentGroups_; }
+ set {
+ multiAgentGroups_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as UnityRLCapabilitiesProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(UnityRLCapabilitiesProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (BaseRLCapabilities != other.BaseRLCapabilities) return false;
+ if (ConcatenatedPngObservations != other.ConcatenatedPngObservations) return false;
+ if (CompressedChannelMapping != other.CompressedChannelMapping) return false;
+ if (HybridActions != other.HybridActions) return false;
+ if (TrainingAnalytics != other.TrainingAnalytics) return false;
+ if (VariableLengthObservation != other.VariableLengthObservation) return false;
+ if (MultiAgentGroups != other.MultiAgentGroups) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (BaseRLCapabilities != false) hash ^= BaseRLCapabilities.GetHashCode();
+ if (ConcatenatedPngObservations != false) hash ^= ConcatenatedPngObservations.GetHashCode();
+ if (CompressedChannelMapping != false) hash ^= CompressedChannelMapping.GetHashCode();
+ if (HybridActions != false) hash ^= HybridActions.GetHashCode();
+ if (TrainingAnalytics != false) hash ^= TrainingAnalytics.GetHashCode();
+ if (VariableLengthObservation != false) hash ^= VariableLengthObservation.GetHashCode();
+ if (MultiAgentGroups != false) hash ^= MultiAgentGroups.GetHashCode();
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (BaseRLCapabilities != false) {
+ output.WriteRawTag(8);
+ output.WriteBool(BaseRLCapabilities);
+ }
+ if (ConcatenatedPngObservations != false) {
+ output.WriteRawTag(16);
+ output.WriteBool(ConcatenatedPngObservations);
+ }
+ if (CompressedChannelMapping != false) {
+ output.WriteRawTag(24);
+ output.WriteBool(CompressedChannelMapping);
+ }
+ if (HybridActions != false) {
+ output.WriteRawTag(32);
+ output.WriteBool(HybridActions);
+ }
+ if (TrainingAnalytics != false) {
+ output.WriteRawTag(40);
+ output.WriteBool(TrainingAnalytics);
+ }
+ if (VariableLengthObservation != false) {
+ output.WriteRawTag(48);
+ output.WriteBool(VariableLengthObservation);
+ }
+ if (MultiAgentGroups != false) {
+ output.WriteRawTag(56);
+ output.WriteBool(MultiAgentGroups);
+ }
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (BaseRLCapabilities != false) {
+ size += 1 + 1;
+ }
+ if (ConcatenatedPngObservations != false) {
+ size += 1 + 1;
+ }
+ if (CompressedChannelMapping != false) {
+ size += 1 + 1;
+ }
+ if (HybridActions != false) {
+ size += 1 + 1;
+ }
+ if (TrainingAnalytics != false) {
+ size += 1 + 1;
+ }
+ if (VariableLengthObservation != false) {
+ size += 1 + 1;
+ }
+ if (MultiAgentGroups != false) {
+ size += 1 + 1;
+ }
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(UnityRLCapabilitiesProto other) {
+ if (other == null) {
+ return;
+ }
+ if (other.BaseRLCapabilities != false) {
+ BaseRLCapabilities = other.BaseRLCapabilities;
+ }
+ if (other.ConcatenatedPngObservations != false) {
+ ConcatenatedPngObservations = other.ConcatenatedPngObservations;
+ }
+ if (other.CompressedChannelMapping != false) {
+ CompressedChannelMapping = other.CompressedChannelMapping;
+ }
+ if (other.HybridActions != false) {
+ HybridActions = other.HybridActions;
+ }
+ if (other.TrainingAnalytics != false) {
+ TrainingAnalytics = other.TrainingAnalytics;
+ }
+ if (other.VariableLengthObservation != false) {
+ VariableLengthObservation = other.VariableLengthObservation;
+ }
+ if (other.MultiAgentGroups != false) {
+ MultiAgentGroups = other.MultiAgentGroups;
+ }
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 8: {
+ BaseRLCapabilities = input.ReadBool();
+ break;
+ }
+ case 16: {
+ ConcatenatedPngObservations = input.ReadBool();
+ break;
+ }
+ case 24: {
+ CompressedChannelMapping = input.ReadBool();
+ break;
+ }
+ case 32: {
+ HybridActions = input.ReadBool();
+ break;
+ }
+ case 40: {
+ TrainingAnalytics = input.ReadBool();
+ break;
+ }
+ case 48: {
+ VariableLengthObservation = input.ReadBool();
+ break;
+ }
+ case 56: {
+ MultiAgentGroups = input.ReadBool();
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Capabilities.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Capabilities.cs.meta
new file mode 100644
index 0000000000..1e65cf6ee3
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Capabilities.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: e8388443b440343299cab2e88988e14e
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Command.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Command.cs
new file mode 100644
index 0000000000..1220f9f9ee
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Command.cs
@@ -0,0 +1,49 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/command.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/command.proto
+ internal static partial class CommandReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/command.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static CommandReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CjBtbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2NvbW1hbmQu",
+ "cHJvdG8SFGNvbW11bmljYXRvcl9vYmplY3RzKi0KDENvbW1hbmRQcm90bxII",
+ "CgRTVEVQEAASCQoFUkVTRVQQARIICgRRVUlUEAJCJaoCIlVuaXR5Lk1MQWdl",
+ "bnRzLkNvbW11bmljYXRvck9iamVjdHNiBnByb3RvMw=="));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(new[] {typeof(global::Unity.MLAgents.CommunicatorObjects.CommandProto), }, null));
+ }
+ #endregion
+
+ }
+ #region Enums
+ internal enum CommandProto {
+ [pbr::OriginalName("STEP")] Step = 0,
+ [pbr::OriginalName("RESET")] Reset = 1,
+ [pbr::OriginalName("QUIT")] Quit = 2,
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Command.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Command.cs.meta
new file mode 100644
index 0000000000..f47033a7c1
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Command.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 9be6f5025f61540eabbc831436642adc
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/CustomResetParameters.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/CustomResetParameters.cs
new file mode 100644
index 0000000000..45099b04c7
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/CustomResetParameters.cs
@@ -0,0 +1,146 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/custom_reset_parameters.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/custom_reset_parameters.proto
+ internal static partial class CustomResetParametersReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/custom_reset_parameters.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static CustomResetParametersReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CkBtbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2N1c3RvbV9y",
+ "ZXNldF9wYXJhbWV0ZXJzLnByb3RvEhRjb21tdW5pY2F0b3Jfb2JqZWN0cyIc",
+ "ChpDdXN0b21SZXNldFBhcmFtZXRlcnNQcm90b0IlqgIiVW5pdHkuTUxBZ2Vu",
+ "dHMuQ29tbXVuaWNhdG9yT2JqZWN0c2IGcHJvdG8z"));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.CustomResetParametersProto), global::Unity.MLAgents.CommunicatorObjects.CustomResetParametersProto.Parser, null, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ internal sealed partial class CustomResetParametersProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new CustomResetParametersProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.CustomResetParametersReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public CustomResetParametersProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public CustomResetParametersProto(CustomResetParametersProto other) : this() {
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public CustomResetParametersProto Clone() {
+ return new CustomResetParametersProto(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as CustomResetParametersProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(CustomResetParametersProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(CustomResetParametersProto other) {
+ if (other == null) {
+ return;
+ }
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/CustomResetParameters.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/CustomResetParameters.cs.meta
new file mode 100644
index 0000000000..aa357195f6
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/CustomResetParameters.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 62f03717ee98042bf8990733358f2dbd
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/DemonstrationMeta.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/DemonstrationMeta.cs
new file mode 100644
index 0000000000..58f8ad8022
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/DemonstrationMeta.cs
@@ -0,0 +1,289 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/demonstration_meta.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/demonstration_meta.proto
+ internal static partial class DemonstrationMetaReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/demonstration_meta.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static DemonstrationMetaReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CjttbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2RlbW9uc3Ry",
+ "YXRpb25fbWV0YS5wcm90bxIUY29tbXVuaWNhdG9yX29iamVjdHMijQEKFkRl",
+ "bW9uc3RyYXRpb25NZXRhUHJvdG8SEwoLYXBpX3ZlcnNpb24YASABKAUSGgoS",
+ "ZGVtb25zdHJhdGlvbl9uYW1lGAIgASgJEhQKDG51bWJlcl9zdGVwcxgDIAEo",
+ "BRIXCg9udW1iZXJfZXBpc29kZXMYBCABKAUSEwoLbWVhbl9yZXdhcmQYBSAB",
+ "KAJCJaoCIlVuaXR5Lk1MQWdlbnRzLkNvbW11bmljYXRvck9iamVjdHNiBnBy",
+ "b3RvMw=="));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.DemonstrationMetaProto), global::Unity.MLAgents.CommunicatorObjects.DemonstrationMetaProto.Parser, new[]{ "ApiVersion", "DemonstrationName", "NumberSteps", "NumberEpisodes", "MeanReward" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ internal sealed partial class DemonstrationMetaProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new DemonstrationMetaProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.DemonstrationMetaReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public DemonstrationMetaProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public DemonstrationMetaProto(DemonstrationMetaProto other) : this() {
+ apiVersion_ = other.apiVersion_;
+ demonstrationName_ = other.demonstrationName_;
+ numberSteps_ = other.numberSteps_;
+ numberEpisodes_ = other.numberEpisodes_;
+ meanReward_ = other.meanReward_;
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public DemonstrationMetaProto Clone() {
+ return new DemonstrationMetaProto(this);
+ }
+
+ /// Field number for the "api_version" field.
+ public const int ApiVersionFieldNumber = 1;
+ private int apiVersion_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int ApiVersion {
+ get { return apiVersion_; }
+ set {
+ apiVersion_ = value;
+ }
+ }
+
+ /// Field number for the "demonstration_name" field.
+ public const int DemonstrationNameFieldNumber = 2;
+ private string demonstrationName_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string DemonstrationName {
+ get { return demonstrationName_; }
+ set {
+ demonstrationName_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ /// Field number for the "number_steps" field.
+ public const int NumberStepsFieldNumber = 3;
+ private int numberSteps_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int NumberSteps {
+ get { return numberSteps_; }
+ set {
+ numberSteps_ = value;
+ }
+ }
+
+ /// Field number for the "number_episodes" field.
+ public const int NumberEpisodesFieldNumber = 4;
+ private int numberEpisodes_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int NumberEpisodes {
+ get { return numberEpisodes_; }
+ set {
+ numberEpisodes_ = value;
+ }
+ }
+
+ /// Field number for the "mean_reward" field.
+ public const int MeanRewardFieldNumber = 5;
+ private float meanReward_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public float MeanReward {
+ get { return meanReward_; }
+ set {
+ meanReward_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as DemonstrationMetaProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(DemonstrationMetaProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (ApiVersion != other.ApiVersion) return false;
+ if (DemonstrationName != other.DemonstrationName) return false;
+ if (NumberSteps != other.NumberSteps) return false;
+ if (NumberEpisodes != other.NumberEpisodes) return false;
+ if (!pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.Equals(MeanReward, other.MeanReward)) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (ApiVersion != 0) hash ^= ApiVersion.GetHashCode();
+ if (DemonstrationName.Length != 0) hash ^= DemonstrationName.GetHashCode();
+ if (NumberSteps != 0) hash ^= NumberSteps.GetHashCode();
+ if (NumberEpisodes != 0) hash ^= NumberEpisodes.GetHashCode();
+ if (MeanReward != 0F) hash ^= pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.GetHashCode(MeanReward);
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (ApiVersion != 0) {
+ output.WriteRawTag(8);
+ output.WriteInt32(ApiVersion);
+ }
+ if (DemonstrationName.Length != 0) {
+ output.WriteRawTag(18);
+ output.WriteString(DemonstrationName);
+ }
+ if (NumberSteps != 0) {
+ output.WriteRawTag(24);
+ output.WriteInt32(NumberSteps);
+ }
+ if (NumberEpisodes != 0) {
+ output.WriteRawTag(32);
+ output.WriteInt32(NumberEpisodes);
+ }
+ if (MeanReward != 0F) {
+ output.WriteRawTag(45);
+ output.WriteFloat(MeanReward);
+ }
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (ApiVersion != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(ApiVersion);
+ }
+ if (DemonstrationName.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(DemonstrationName);
+ }
+ if (NumberSteps != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(NumberSteps);
+ }
+ if (NumberEpisodes != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(NumberEpisodes);
+ }
+ if (MeanReward != 0F) {
+ size += 1 + 4;
+ }
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(DemonstrationMetaProto other) {
+ if (other == null) {
+ return;
+ }
+ if (other.ApiVersion != 0) {
+ ApiVersion = other.ApiVersion;
+ }
+ if (other.DemonstrationName.Length != 0) {
+ DemonstrationName = other.DemonstrationName;
+ }
+ if (other.NumberSteps != 0) {
+ NumberSteps = other.NumberSteps;
+ }
+ if (other.NumberEpisodes != 0) {
+ NumberEpisodes = other.NumberEpisodes;
+ }
+ if (other.MeanReward != 0F) {
+ MeanReward = other.MeanReward;
+ }
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 8: {
+ ApiVersion = input.ReadInt32();
+ break;
+ }
+ case 18: {
+ DemonstrationName = input.ReadString();
+ break;
+ }
+ case 24: {
+ NumberSteps = input.ReadInt32();
+ break;
+ }
+ case 32: {
+ NumberEpisodes = input.ReadInt32();
+ break;
+ }
+ case 45: {
+ MeanReward = input.ReadFloat();
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/DemonstrationMeta.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/DemonstrationMeta.cs.meta
new file mode 100644
index 0000000000..41176197e9
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/DemonstrationMeta.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 7248e2660150f4a39bb99dfabb9bae7d
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/EngineConfiguration.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/EngineConfiguration.cs
new file mode 100644
index 0000000000..6a05c09f28
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/EngineConfiguration.cs
@@ -0,0 +1,317 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/engine_configuration.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/engine_configuration.proto
+ internal static partial class EngineConfigurationReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/engine_configuration.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static EngineConfigurationReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "Cj1tbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2VuZ2luZV9j",
+ "b25maWd1cmF0aW9uLnByb3RvEhRjb21tdW5pY2F0b3Jfb2JqZWN0cyKVAQoY",
+ "RW5naW5lQ29uZmlndXJhdGlvblByb3RvEg0KBXdpZHRoGAEgASgFEg4KBmhl",
+ "aWdodBgCIAEoBRIVCg1xdWFsaXR5X2xldmVsGAMgASgFEhIKCnRpbWVfc2Nh",
+ "bGUYBCABKAISGQoRdGFyZ2V0X2ZyYW1lX3JhdGUYBSABKAUSFAoMc2hvd19t",
+ "b25pdG9yGAYgASgIQiWqAiJVbml0eS5NTEFnZW50cy5Db21tdW5pY2F0b3JP",
+ "YmplY3RzYgZwcm90bzM="));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.EngineConfigurationProto), global::Unity.MLAgents.CommunicatorObjects.EngineConfigurationProto.Parser, new[]{ "Width", "Height", "QualityLevel", "TimeScale", "TargetFrameRate", "ShowMonitor" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ internal sealed partial class EngineConfigurationProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new EngineConfigurationProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.EngineConfigurationReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EngineConfigurationProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EngineConfigurationProto(EngineConfigurationProto other) : this() {
+ width_ = other.width_;
+ height_ = other.height_;
+ qualityLevel_ = other.qualityLevel_;
+ timeScale_ = other.timeScale_;
+ targetFrameRate_ = other.targetFrameRate_;
+ showMonitor_ = other.showMonitor_;
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public EngineConfigurationProto Clone() {
+ return new EngineConfigurationProto(this);
+ }
+
+ /// Field number for the "width" field.
+ public const int WidthFieldNumber = 1;
+ private int width_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int Width {
+ get { return width_; }
+ set {
+ width_ = value;
+ }
+ }
+
+ /// Field number for the "height" field.
+ public const int HeightFieldNumber = 2;
+ private int height_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int Height {
+ get { return height_; }
+ set {
+ height_ = value;
+ }
+ }
+
+ /// Field number for the "quality_level" field.
+ public const int QualityLevelFieldNumber = 3;
+ private int qualityLevel_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int QualityLevel {
+ get { return qualityLevel_; }
+ set {
+ qualityLevel_ = value;
+ }
+ }
+
+ /// Field number for the "time_scale" field.
+ public const int TimeScaleFieldNumber = 4;
+ private float timeScale_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public float TimeScale {
+ get { return timeScale_; }
+ set {
+ timeScale_ = value;
+ }
+ }
+
+ /// Field number for the "target_frame_rate" field.
+ public const int TargetFrameRateFieldNumber = 5;
+ private int targetFrameRate_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int TargetFrameRate {
+ get { return targetFrameRate_; }
+ set {
+ targetFrameRate_ = value;
+ }
+ }
+
+ /// Field number for the "show_monitor" field.
+ public const int ShowMonitorFieldNumber = 6;
+ private bool showMonitor_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool ShowMonitor {
+ get { return showMonitor_; }
+ set {
+ showMonitor_ = value;
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as EngineConfigurationProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(EngineConfigurationProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (Width != other.Width) return false;
+ if (Height != other.Height) return false;
+ if (QualityLevel != other.QualityLevel) return false;
+ if (!pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.Equals(TimeScale, other.TimeScale)) return false;
+ if (TargetFrameRate != other.TargetFrameRate) return false;
+ if (ShowMonitor != other.ShowMonitor) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (Width != 0) hash ^= Width.GetHashCode();
+ if (Height != 0) hash ^= Height.GetHashCode();
+ if (QualityLevel != 0) hash ^= QualityLevel.GetHashCode();
+ if (TimeScale != 0F) hash ^= pbc::ProtobufEqualityComparers.BitwiseSingleEqualityComparer.GetHashCode(TimeScale);
+ if (TargetFrameRate != 0) hash ^= TargetFrameRate.GetHashCode();
+ if (ShowMonitor != false) hash ^= ShowMonitor.GetHashCode();
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (Width != 0) {
+ output.WriteRawTag(8);
+ output.WriteInt32(Width);
+ }
+ if (Height != 0) {
+ output.WriteRawTag(16);
+ output.WriteInt32(Height);
+ }
+ if (QualityLevel != 0) {
+ output.WriteRawTag(24);
+ output.WriteInt32(QualityLevel);
+ }
+ if (TimeScale != 0F) {
+ output.WriteRawTag(37);
+ output.WriteFloat(TimeScale);
+ }
+ if (TargetFrameRate != 0) {
+ output.WriteRawTag(40);
+ output.WriteInt32(TargetFrameRate);
+ }
+ if (ShowMonitor != false) {
+ output.WriteRawTag(48);
+ output.WriteBool(ShowMonitor);
+ }
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (Width != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(Width);
+ }
+ if (Height != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(Height);
+ }
+ if (QualityLevel != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(QualityLevel);
+ }
+ if (TimeScale != 0F) {
+ size += 1 + 4;
+ }
+ if (TargetFrameRate != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(TargetFrameRate);
+ }
+ if (ShowMonitor != false) {
+ size += 1 + 1;
+ }
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(EngineConfigurationProto other) {
+ if (other == null) {
+ return;
+ }
+ if (other.Width != 0) {
+ Width = other.Width;
+ }
+ if (other.Height != 0) {
+ Height = other.Height;
+ }
+ if (other.QualityLevel != 0) {
+ QualityLevel = other.QualityLevel;
+ }
+ if (other.TimeScale != 0F) {
+ TimeScale = other.TimeScale;
+ }
+ if (other.TargetFrameRate != 0) {
+ TargetFrameRate = other.TargetFrameRate;
+ }
+ if (other.ShowMonitor != false) {
+ ShowMonitor = other.ShowMonitor;
+ }
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 8: {
+ Width = input.ReadInt32();
+ break;
+ }
+ case 16: {
+ Height = input.ReadInt32();
+ break;
+ }
+ case 24: {
+ QualityLevel = input.ReadInt32();
+ break;
+ }
+ case 37: {
+ TimeScale = input.ReadFloat();
+ break;
+ }
+ case 40: {
+ TargetFrameRate = input.ReadInt32();
+ break;
+ }
+ case 48: {
+ ShowMonitor = input.ReadBool();
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/EngineConfiguration.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/EngineConfiguration.cs.meta
new file mode 100644
index 0000000000..cb08edae85
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/EngineConfiguration.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 129a5bbec69fc4f42bc70e422660c8f0
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Header.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Header.cs
new file mode 100644
index 0000000000..2f38cc8f44
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Header.cs
@@ -0,0 +1,202 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/header.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/header.proto
+ internal static partial class HeaderReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/header.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static HeaderReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "Ci9tbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2hlYWRlci5w",
+ "cm90bxIUY29tbXVuaWNhdG9yX29iamVjdHMiLgoLSGVhZGVyUHJvdG8SDgoG",
+ "c3RhdHVzGAEgASgFEg8KB21lc3NhZ2UYAiABKAlCJaoCIlVuaXR5Lk1MQWdl",
+ "bnRzLkNvbW11bmljYXRvck9iamVjdHNiBnByb3RvMw=="));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.HeaderProto), global::Unity.MLAgents.CommunicatorObjects.HeaderProto.Parser, new[]{ "Status", "Message" }, null, null, null)
+ }));
+ }
+ #endregion
+
+ }
+ #region Messages
+ internal sealed partial class HeaderProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new HeaderProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.HeaderReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public HeaderProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public HeaderProto(HeaderProto other) : this() {
+ status_ = other.status_;
+ message_ = other.message_;
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public HeaderProto Clone() {
+ return new HeaderProto(this);
+ }
+
+ /// Field number for the "status" field.
+ public const int StatusFieldNumber = 1;
+ private int status_;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int Status {
+ get { return status_; }
+ set {
+ status_ = value;
+ }
+ }
+
+ /// Field number for the "message" field.
+ public const int MessageFieldNumber = 2;
+ private string message_ = "";
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public string Message {
+ get { return message_; }
+ set {
+ message_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override bool Equals(object other) {
+ return Equals(other as HeaderProto);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public bool Equals(HeaderProto other) {
+ if (ReferenceEquals(other, null)) {
+ return false;
+ }
+ if (ReferenceEquals(other, this)) {
+ return true;
+ }
+ if (Status != other.Status) return false;
+ if (Message != other.Message) return false;
+ return Equals(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override int GetHashCode() {
+ int hash = 1;
+ if (Status != 0) hash ^= Status.GetHashCode();
+ if (Message.Length != 0) hash ^= Message.GetHashCode();
+ if (_unknownFields != null) {
+ hash ^= _unknownFields.GetHashCode();
+ }
+ return hash;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public override string ToString() {
+ return pb::JsonFormatter.ToDiagnosticString(this);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void WriteTo(pb::CodedOutputStream output) {
+ if (Status != 0) {
+ output.WriteRawTag(8);
+ output.WriteInt32(Status);
+ }
+ if (Message.Length != 0) {
+ output.WriteRawTag(18);
+ output.WriteString(Message);
+ }
+ if (_unknownFields != null) {
+ _unknownFields.WriteTo(output);
+ }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public int CalculateSize() {
+ int size = 0;
+ if (Status != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeInt32Size(Status);
+ }
+ if (Message.Length != 0) {
+ size += 1 + pb::CodedOutputStream.ComputeStringSize(Message);
+ }
+ if (_unknownFields != null) {
+ size += _unknownFields.CalculateSize();
+ }
+ return size;
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(HeaderProto other) {
+ if (other == null) {
+ return;
+ }
+ if (other.Status != 0) {
+ Status = other.Status;
+ }
+ if (other.Message.Length != 0) {
+ Message = other.Message;
+ }
+ _unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public void MergeFrom(pb::CodedInputStream input) {
+ uint tag;
+ while ((tag = input.ReadTag()) != 0) {
+ switch(tag) {
+ default:
+ _unknownFields = pb::UnknownFieldSet.MergeFieldFrom(_unknownFields, input);
+ break;
+ case 8: {
+ Status = input.ReadInt32();
+ break;
+ }
+ case 18: {
+ Message = input.ReadString();
+ break;
+ }
+ }
+ }
+ }
+
+ }
+
+ #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Header.cs.meta b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Header.cs.meta
new file mode 100644
index 0000000000..3084742c95
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Header.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 870996bd75a1a4fbcbb120b1e1e66c37
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Observation.cs b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Observation.cs
new file mode 100644
index 0000000000..3e23c8d991
--- /dev/null
+++ b/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Observation.cs
@@ -0,0 +1,546 @@
+//
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: mlagents_envs/communicator_objects/observation.proto
+//
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Unity.MLAgents.CommunicatorObjects {
+
+ /// Holder for reflection information generated from mlagents_envs/communicator_objects/observation.proto
+ internal static partial class ObservationReflection {
+
+ #region Descriptor
+ /// File descriptor for mlagents_envs/communicator_objects/observation.proto
+ public static pbr::FileDescriptor Descriptor {
+ get { return descriptor; }
+ }
+ private static pbr::FileDescriptor descriptor;
+
+ static ObservationReflection() {
+ byte[] descriptorData = global::System.Convert.FromBase64String(
+ string.Concat(
+ "CjRtbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL29ic2VydmF0",
+ "aW9uLnByb3RvEhRjb21tdW5pY2F0b3Jfb2JqZWN0cyKPAwoQT2JzZXJ2YXRp",
+ "b25Qcm90bxINCgVzaGFwZRgBIAMoBRJEChBjb21wcmVzc2lvbl90eXBlGAIg",
+ "ASgOMiouY29tbXVuaWNhdG9yX29iamVjdHMuQ29tcHJlc3Npb25UeXBlUHJv",
+ "dG8SGQoPY29tcHJlc3NlZF9kYXRhGAMgASgMSAASRgoKZmxvYXRfZGF0YRgE",
+ "IAEoCzIwLmNvbW11bmljYXRvcl9vYmplY3RzLk9ic2VydmF0aW9uUHJvdG8u",
+ "RmxvYXREYXRhSAASIgoaY29tcHJlc3NlZF9jaGFubmVsX21hcHBpbmcYBSAD",
+ "KAUSHAoUZGltZW5zaW9uX3Byb3BlcnRpZXMYBiADKAUSRAoQb2JzZXJ2YXRp",
+ "b25fdHlwZRgHIAEoDjIqLmNvbW11bmljYXRvcl9vYmplY3RzLk9ic2VydmF0",
+ "aW9uVHlwZVByb3RvEgwKBG5hbWUYCCABKAkaGQoJRmxvYXREYXRhEgwKBGRh",
+ "dGEYASADKAJCEgoQb2JzZXJ2YXRpb25fZGF0YSopChRDb21wcmVzc2lvblR5",
+ "cGVQcm90bxIICgROT05FEAASBwoDUE5HEAEqQAoUT2JzZXJ2YXRpb25UeXBl",
+ "UHJvdG8SCwoHREVGQVVMVBAAEg8KC0dPQUxfU0lHTkFMEAEiBAgCEAIiBAgD",
+ "EANCJaoCIlVuaXR5Lk1MQWdlbnRzLkNvbW11bmljYXRvck9iamVjdHNiBnBy",
+ "b3RvMw=="));
+ descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+ new pbr::FileDescriptor[] { },
+ new pbr::GeneratedClrTypeInfo(new[] {typeof(global::Unity.MLAgents.CommunicatorObjects.CompressionTypeProto), typeof(global::Unity.MLAgents.CommunicatorObjects.ObservationTypeProto), }, new pbr::GeneratedClrTypeInfo[] {
+ new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.ObservationProto), global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Parser, new[]{ "Shape", "CompressionType", "CompressedData", "FloatData", "CompressedChannelMapping", "DimensionProperties", "ObservationType", "Name" }, new[]{ "ObservationData" }, null, new pbr::GeneratedClrTypeInfo[] { new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Types.FloatData), global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Types.FloatData.Parser, new[]{ "Data" }, null, null, null)})
+ }));
+ }
+ #endregion
+
+ }
+ #region Enums
+ internal enum CompressionTypeProto {
+ [pbr::OriginalName("NONE")] None = 0,
+ [pbr::OriginalName("PNG")] Png = 1,
+ }
+
+ internal enum ObservationTypeProto {
+ [pbr::OriginalName("DEFAULT")] Default = 0,
+ [pbr::OriginalName("GOAL_SIGNAL")] GoalSignal = 1,
+ }
+
+ #endregion
+
+ #region Messages
+ internal sealed partial class ObservationProto : pb::IMessage {
+ private static readonly pb::MessageParser _parser = new pb::MessageParser(() => new ObservationProto());
+ private pb::UnknownFieldSet _unknownFields;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pb::MessageParser Parser { get { return _parser; } }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public static pbr::MessageDescriptor Descriptor {
+ get { return global::Unity.MLAgents.CommunicatorObjects.ObservationReflection.Descriptor.MessageTypes[0]; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ pbr::MessageDescriptor pb::IMessage.Descriptor {
+ get { return Descriptor; }
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ObservationProto() {
+ OnConstruction();
+ }
+
+ partial void OnConstruction();
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ObservationProto(ObservationProto other) : this() {
+ shape_ = other.shape_.Clone();
+ compressionType_ = other.compressionType_;
+ compressedChannelMapping_ = other.compressedChannelMapping_.Clone();
+ dimensionProperties_ = other.dimensionProperties_.Clone();
+ observationType_ = other.observationType_;
+ name_ = other.name_;
+ switch (other.ObservationDataCase) {
+ case ObservationDataOneofCase.CompressedData:
+ CompressedData = other.CompressedData;
+ break;
+ case ObservationDataOneofCase.FloatData:
+ FloatData = other.FloatData.Clone();
+ break;
+ }
+
+ _unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
+ }
+
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public ObservationProto Clone() {
+ return new ObservationProto(this);
+ }
+
+ /// Field number for the "shape" field.
+ public const int ShapeFieldNumber = 1;
+ private static readonly pb::FieldCodec _repeated_shape_codec
+ = pb::FieldCodec.ForInt32(10);
+ private readonly pbc::RepeatedField shape_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField Shape {
+ get { return shape_; }
+ }
+
+ /// Field number for the "compression_type" field.
+ public const int CompressionTypeFieldNumber = 2;
+ private global::Unity.MLAgents.CommunicatorObjects.CompressionTypeProto compressionType_ = 0;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Unity.MLAgents.CommunicatorObjects.CompressionTypeProto CompressionType {
+ get { return compressionType_; }
+ set {
+ compressionType_ = value;
+ }
+ }
+
+ /// Field number for the "compressed_data" field.
+ public const int CompressedDataFieldNumber = 3;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pb::ByteString CompressedData {
+ get { return observationDataCase_ == ObservationDataOneofCase.CompressedData ? (pb::ByteString) observationData_ : pb::ByteString.Empty; }
+ set {
+ observationData_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+ observationDataCase_ = ObservationDataOneofCase.CompressedData;
+ }
+ }
+
+ /// Field number for the "float_data" field.
+ public const int FloatDataFieldNumber = 4;
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Types.FloatData FloatData {
+ get { return observationDataCase_ == ObservationDataOneofCase.FloatData ? (global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Types.FloatData) observationData_ : null; }
+ set {
+ observationData_ = value;
+ observationDataCase_ = value == null ? ObservationDataOneofCase.None : ObservationDataOneofCase.FloatData;
+ }
+ }
+
+ /// Field number for the "compressed_channel_mapping" field.
+ public const int CompressedChannelMappingFieldNumber = 5;
+ private static readonly pb::FieldCodec _repeated_compressedChannelMapping_codec
+ = pb::FieldCodec.ForInt32(42);
+ private readonly pbc::RepeatedField compressedChannelMapping_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField CompressedChannelMapping {
+ get { return compressedChannelMapping_; }
+ }
+
+ /// Field number for the "dimension_properties" field.
+ public const int DimensionPropertiesFieldNumber = 6;
+ private static readonly pb::FieldCodec _repeated_dimensionProperties_codec
+ = pb::FieldCodec.ForInt32(50);
+ private readonly pbc::RepeatedField dimensionProperties_ = new pbc::RepeatedField();
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+ public pbc::RepeatedField