From 53b3a961022c8629ed8ea0e69f274329ac5ac053 Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Sun, 25 May 2025 19:26:15 +0000 Subject: [PATCH 1/2] Jules was unable to complete the task in time. Please review the work done so far and provide feedback for Jules to continue. --- .../templates/home/main_script.html | 2 +- poetry.lock | 1017 +++++++++++++++++ pyproject.toml | 52 + reverie/backend_server/global_methods.py | 160 ++- reverie/backend_server/maze.py | 206 ++-- .../persona/cognitive_modules/converse.py | 88 +- .../persona/cognitive_modules/execute.py | 102 +- .../persona/cognitive_modules/perceive.py | 106 +- .../persona/cognitive_modules/plan.py | 622 +++++----- .../persona/cognitive_modules/reflect.py | 107 +- .../persona/cognitive_modules/retrieve.py | 226 ++-- .../memory_structures/associative_memory.py | 51 +- .../persona/memory_structures/scratch.py | 293 +++-- .../memory_structures/spatial_memory.py | 61 +- reverie/backend_server/persona/persona.py | 224 ++-- reverie/backend_server/reverie.py | 959 ++++++++++------ reverie/backend_server/test_reverie.py | 519 +++++++++ 17 files changed, 3273 insertions(+), 1522 deletions(-) create mode 100644 poetry.lock create mode 100644 pyproject.toml create mode 100644 reverie/backend_server/test_reverie.py diff --git a/environment/frontend_server/templates/home/main_script.html b/environment/frontend_server/templates/home/main_script.html index 19c1dcf9f0..f44c2224fe 100644 --- a/environment/frontend_server/templates/home/main_script.html +++ b/environment/frontend_server/templates/home/main_script.html @@ -44,7 +44,7 @@ // Phaser 3.0 global settings. // Configuration meant to be passed to the main Phaser game instance. const config = { - type: Phaser.AUTO, + type: Phaser.CANVAS, width: 1500, height: 800, parent: "game-container", diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000000..5b9cb642a8 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1017 @@ +# This file is automatically @generated by Poetry 2.1.0 and should not be changed by hand. + +[[package]] +name = "aiohttp" +version = "3.8.3" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "aiohttp-3.8.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ba71c9b4dcbb16212f334126cc3d8beb6af377f6703d9dc2d9fb3874fd667ee9"}, + {file = "aiohttp-3.8.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d24b8bb40d5c61ef2d9b6a8f4528c2f17f1c5d2d31fed62ec860f6006142e83e"}, + {file = "aiohttp-3.8.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f88df3a83cf9df566f171adba39d5bd52814ac0b94778d2448652fc77f9eb491"}, + {file = "aiohttp-3.8.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97decbb3372d4b69e4d4c8117f44632551c692bb1361b356a02b97b69e18a62"}, + {file = "aiohttp-3.8.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:309aa21c1d54b8ef0723181d430347d7452daaff93e8e2363db8e75c72c2fb2d"}, + {file = "aiohttp-3.8.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad5383a67514e8e76906a06741febd9126fc7c7ff0f599d6fcce3e82b80d026f"}, + {file = "aiohttp-3.8.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20acae4f268317bb975671e375493dbdbc67cddb5f6c71eebdb85b34444ac46b"}, + {file = "aiohttp-3.8.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05a3c31c6d7cd08c149e50dc7aa2568317f5844acd745621983380597f027a18"}, + {file = "aiohttp-3.8.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d6f76310355e9fae637c3162936e9504b4767d5c52ca268331e2756e54fd4ca5"}, + {file = "aiohttp-3.8.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:256deb4b29fe5e47893fa32e1de2d73c3afe7407738bd3c63829874661d4822d"}, + {file = "aiohttp-3.8.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:5c59fcd80b9049b49acd29bd3598cada4afc8d8d69bd4160cd613246912535d7"}, + {file = "aiohttp-3.8.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:059a91e88f2c00fe40aed9031b3606c3f311414f86a90d696dd982e7aec48142"}, + {file = "aiohttp-3.8.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2feebbb6074cdbd1ac276dbd737b40e890a1361b3cc30b74ac2f5e24aab41f7b"}, + {file = "aiohttp-3.8.3-cp310-cp310-win32.whl", hash = "sha256:5bf651afd22d5f0c4be16cf39d0482ea494f5c88f03e75e5fef3a85177fecdeb"}, + {file = "aiohttp-3.8.3-cp310-cp310-win_amd64.whl", hash = "sha256:653acc3880459f82a65e27bd6526e47ddf19e643457d36a2250b85b41a564715"}, + {file = "aiohttp-3.8.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:86fc24e58ecb32aee09f864cb11bb91bc4c1086615001647dbfc4dc8c32f4008"}, + {file = "aiohttp-3.8.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75e14eac916f024305db517e00a9252714fce0abcb10ad327fb6dcdc0d060f1d"}, + {file = "aiohttp-3.8.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d1fde0f44029e02d02d3993ad55ce93ead9bb9b15c6b7ccd580f90bd7e3de476"}, + {file = "aiohttp-3.8.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ab94426ddb1ecc6a0b601d832d5d9d421820989b8caa929114811369673235c"}, + {file = "aiohttp-3.8.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89d2e02167fa95172c017732ed7725bc8523c598757f08d13c5acca308e1a061"}, + {file = "aiohttp-3.8.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02f9a2c72fc95d59b881cf38a4b2be9381b9527f9d328771e90f72ac76f31ad8"}, + {file = "aiohttp-3.8.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c7149272fb5834fc186328e2c1fa01dda3e1fa940ce18fded6d412e8f2cf76d"}, + {file = "aiohttp-3.8.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:512bd5ab136b8dc0ffe3fdf2dfb0c4b4f49c8577f6cae55dca862cd37a4564e2"}, + {file = "aiohttp-3.8.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7018ecc5fe97027214556afbc7c502fbd718d0740e87eb1217b17efd05b3d276"}, + {file = "aiohttp-3.8.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88c70ed9da9963d5496d38320160e8eb7e5f1886f9290475a881db12f351ab5d"}, + {file = "aiohttp-3.8.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:da22885266bbfb3f78218dc40205fed2671909fbd0720aedba39b4515c038091"}, + {file = "aiohttp-3.8.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:e65bc19919c910127c06759a63747ebe14f386cda573d95bcc62b427ca1afc73"}, + {file = "aiohttp-3.8.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:08c78317e950e0762c2983f4dd58dc5e6c9ff75c8a0efeae299d363d439c8e34"}, + {file = "aiohttp-3.8.3-cp311-cp311-win32.whl", hash = "sha256:45d88b016c849d74ebc6f2b6e8bc17cabf26e7e40c0661ddd8fae4c00f015697"}, + {file = "aiohttp-3.8.3-cp311-cp311-win_amd64.whl", hash = "sha256:96372fc29471646b9b106ee918c8eeb4cca423fcbf9a34daa1b93767a88a2290"}, + {file = "aiohttp-3.8.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c971bf3786b5fad82ce5ad570dc6ee420f5b12527157929e830f51c55dc8af77"}, + {file = "aiohttp-3.8.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff25f48fc8e623d95eca0670b8cc1469a83783c924a602e0fbd47363bb54aaca"}, + {file = "aiohttp-3.8.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e381581b37db1db7597b62a2e6b8b57c3deec95d93b6d6407c5b61ddc98aca6d"}, + {file = "aiohttp-3.8.3-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:db19d60d846283ee275d0416e2a23493f4e6b6028825b51290ac05afc87a6f97"}, + {file = "aiohttp-3.8.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25892c92bee6d9449ffac82c2fe257f3a6f297792cdb18ad784737d61e7a9a85"}, + {file = "aiohttp-3.8.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:398701865e7a9565d49189f6c90868efaca21be65c725fc87fc305906be915da"}, + {file = "aiohttp-3.8.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4a4fbc769ea9b6bd97f4ad0b430a6807f92f0e5eb020f1e42ece59f3ecfc4585"}, + {file = "aiohttp-3.8.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:b29bfd650ed8e148f9c515474a6ef0ba1090b7a8faeee26b74a8ff3b33617502"}, + {file = "aiohttp-3.8.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:1e56b9cafcd6531bab5d9b2e890bb4937f4165109fe98e2b98ef0dcfcb06ee9d"}, + {file = "aiohttp-3.8.3-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ec40170327d4a404b0d91855d41bfe1fe4b699222b2b93e3d833a27330a87a6d"}, + {file = "aiohttp-3.8.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:2df5f139233060578d8c2c975128fb231a89ca0a462b35d4b5fcf7c501ebdbe1"}, + {file = "aiohttp-3.8.3-cp36-cp36m-win32.whl", hash = "sha256:f973157ffeab5459eefe7b97a804987876dd0a55570b8fa56b4e1954bf11329b"}, + {file = "aiohttp-3.8.3-cp36-cp36m-win_amd64.whl", hash = "sha256:437399385f2abcd634865705bdc180c8314124b98299d54fe1d4c8990f2f9494"}, + {file = "aiohttp-3.8.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:09e28f572b21642128ef31f4e8372adb6888846f32fecb288c8b0457597ba61a"}, + {file = "aiohttp-3.8.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f3553510abdbec67c043ca85727396ceed1272eef029b050677046d3387be8d"}, + {file = "aiohttp-3.8.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e168a7560b7c61342ae0412997b069753f27ac4862ec7867eff74f0fe4ea2ad9"}, + {file = "aiohttp-3.8.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:db4c979b0b3e0fa7e9e69ecd11b2b3174c6963cebadeecfb7ad24532ffcdd11a"}, + {file = "aiohttp-3.8.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e164e0a98e92d06da343d17d4e9c4da4654f4a4588a20d6c73548a29f176abe2"}, + {file = "aiohttp-3.8.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8a78079d9a39ca9ca99a8b0ac2fdc0c4d25fc80c8a8a82e5c8211509c523363"}, + {file = "aiohttp-3.8.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:21b30885a63c3f4ff5b77a5d6caf008b037cb521a5f33eab445dc566f6d092cc"}, + {file = "aiohttp-3.8.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4b0f30372cef3fdc262f33d06e7b411cd59058ce9174ef159ad938c4a34a89da"}, + {file = "aiohttp-3.8.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:8135fa153a20d82ffb64f70a1b5c2738684afa197839b34cc3e3c72fa88d302c"}, + {file = "aiohttp-3.8.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ad61a9639792fd790523ba072c0555cd6be5a0baf03a49a5dd8cfcf20d56df48"}, + {file = "aiohttp-3.8.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:978b046ca728073070e9abc074b6299ebf3501e8dee5e26efacb13cec2b2dea0"}, + {file = "aiohttp-3.8.3-cp37-cp37m-win32.whl", hash = "sha256:0d2c6d8c6872df4a6ec37d2ede71eff62395b9e337b4e18efd2177de883a5033"}, + {file = "aiohttp-3.8.3-cp37-cp37m-win_amd64.whl", hash = "sha256:21d69797eb951f155026651f7e9362877334508d39c2fc37bd04ff55b2007091"}, + {file = "aiohttp-3.8.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ca9af5f8f5812d475c5259393f52d712f6d5f0d7fdad9acdb1107dd9e3cb7eb"}, + {file = "aiohttp-3.8.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d90043c1882067f1bd26196d5d2db9aa6d268def3293ed5fb317e13c9413ea4"}, + {file = "aiohttp-3.8.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d737fc67b9a970f3234754974531dc9afeea11c70791dcb7db53b0cf81b79784"}, + {file = "aiohttp-3.8.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebf909ea0a3fc9596e40d55d8000702a85e27fd578ff41a5500f68f20fd32e6c"}, + {file = "aiohttp-3.8.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5835f258ca9f7c455493a57ee707b76d2d9634d84d5d7f62e77be984ea80b849"}, + {file = "aiohttp-3.8.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da37dcfbf4b7f45d80ee386a5f81122501ec75672f475da34784196690762f4b"}, + {file = "aiohttp-3.8.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87f44875f2804bc0511a69ce44a9595d5944837a62caecc8490bbdb0e18b1342"}, + {file = "aiohttp-3.8.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:527b3b87b24844ea7865284aabfab08eb0faf599b385b03c2aa91fc6edd6e4b6"}, + {file = "aiohttp-3.8.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5ba88df9aa5e2f806650fcbeedbe4f6e8736e92fc0e73b0400538fd25a4dd96"}, + {file = "aiohttp-3.8.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e7b8813be97cab8cb52b1375f41f8e6804f6507fe4660152e8ca5c48f0436017"}, + {file = "aiohttp-3.8.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:2dea10edfa1a54098703cb7acaa665c07b4e7568472a47f4e64e6319d3821ccf"}, + {file = "aiohttp-3.8.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:713d22cd9643ba9025d33c4af43943c7a1eb8547729228de18d3e02e278472b6"}, + {file = "aiohttp-3.8.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2d252771fc85e0cf8da0b823157962d70639e63cb9b578b1dec9868dd1f4f937"}, + {file = "aiohttp-3.8.3-cp38-cp38-win32.whl", hash = "sha256:66bd5f950344fb2b3dbdd421aaa4e84f4411a1a13fca3aeb2bcbe667f80c9f76"}, + {file = "aiohttp-3.8.3-cp38-cp38-win_amd64.whl", hash = "sha256:84b14f36e85295fe69c6b9789b51a0903b774046d5f7df538176516c3e422446"}, + {file = "aiohttp-3.8.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16c121ba0b1ec2b44b73e3a8a171c4f999b33929cd2397124a8c7fcfc8cd9e06"}, + {file = "aiohttp-3.8.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8d6aaa4e7155afaf994d7924eb290abbe81a6905b303d8cb61310a2aba1c68ba"}, + {file = "aiohttp-3.8.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43046a319664a04b146f81b40e1545d4c8ac7b7dd04c47e40bf09f65f2437346"}, + {file = "aiohttp-3.8.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599418aaaf88a6d02a8c515e656f6faf3d10618d3dd95866eb4436520096c84b"}, + {file = "aiohttp-3.8.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92a2964319d359f494f16011e23434f6f8ef0434acd3cf154a6b7bec511e2fb7"}, + {file = "aiohttp-3.8.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73a4131962e6d91109bca6536416aa067cf6c4efb871975df734f8d2fd821b37"}, + {file = "aiohttp-3.8.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:598adde339d2cf7d67beaccda3f2ce7c57b3b412702f29c946708f69cf8222aa"}, + {file = "aiohttp-3.8.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75880ed07be39beff1881d81e4a907cafb802f306efd6d2d15f2b3c69935f6fb"}, + {file = "aiohttp-3.8.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0239da9fbafd9ff82fd67c16704a7d1bccf0d107a300e790587ad05547681c8"}, + {file = "aiohttp-3.8.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4e3a23ec214e95c9fe85a58470b660efe6534b83e6cbe38b3ed52b053d7cb6ad"}, + {file = "aiohttp-3.8.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:47841407cc89a4b80b0c52276f3cc8138bbbfba4b179ee3acbd7d77ae33f7ac4"}, + {file = "aiohttp-3.8.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:54d107c89a3ebcd13228278d68f1436d3f33f2dd2af5415e3feaeb1156e1a62c"}, + {file = "aiohttp-3.8.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c37c5cce780349d4d51739ae682dec63573847a2a8dcb44381b174c3d9c8d403"}, + {file = "aiohttp-3.8.3-cp39-cp39-win32.whl", hash = "sha256:f178d2aadf0166be4df834c4953da2d7eef24719e8aec9a65289483eeea9d618"}, + {file = "aiohttp-3.8.3-cp39-cp39-win_amd64.whl", hash = "sha256:88e5be56c231981428f4f506c68b6a46fa25c4123a2e86d156c58a8369d31ab7"}, + {file = "aiohttp-3.8.3.tar.gz", hash = "sha256:3828fb41b7203176b82fe5d699e0d845435f2374750a44b480ea6b930f6be269"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +async-timeout = ">=4.0.0a3,<5.0" +attrs = ">=17.3.0" +charset-normalizer = ">=2.0,<3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "cchardet ; python_version < \"3.10\""] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "asgiref" +version = "3.5.2" +description = "ASGI specs, helper code, and adapters" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "asgiref-3.5.2-py3-none-any.whl", hash = "sha256:1d2880b792ae8757289136f1db2b7b99100ce959b2aa57fd69dab783d05afac4"}, + {file = "asgiref-3.5.2.tar.gz", hash = "sha256:4a29362a6acebe09bf1d6640db38c1dc3d9217c68e6f9f6204d72667fc19a424"}, +] + +[package.extras] +tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] + +[[package]] +name = "async-generator" +version = "1.10" +description = "Async generators and context managers for Python 3.5+" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "async_generator-1.10-py3-none-any.whl", hash = "sha256:01c7bf666359b4967d2cda0000cc2e4af16a0ae098cbffcb8472fb9e8ad6585b"}, + {file = "async_generator-1.10.tar.gz", hash = "sha256:6ebb3d106c12920aaae42ccb6f787ef5eefdcdd166ea3d628fa8476abe712144"}, +] + +[[package]] +name = "async-timeout" +version = "4.0.2" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, + {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, +] + +[[package]] +name = "attrs" +version = "22.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, + {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] +tests = ["attrs[tests-no-zope]", "zope.interface"] +tests-no-zope = ["cloudpickle ; platform_python_implementation == \"CPython\"", "cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990) ; platform_python_implementation == \"CPython\"", "mypy (>=0.971,<0.990) ; platform_python_implementation == \"CPython\"", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version < \"3.11\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version < \"3.11\"", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] + +[[package]] +name = "boto" +version = "2.49.0" +description = "Amazon Web Services Library" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "boto-2.49.0-py2.py3-none-any.whl", hash = "sha256:147758d41ae7240dc989f0039f27da8ca0d53734be0eb869ef16e3adcfa462e8"}, + {file = "boto-2.49.0.tar.gz", hash = "sha256:ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a"}, +] + +[[package]] +name = "botocore" +version = "1.29.43" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">= 3.7" +groups = ["main"] +files = [ + {file = "botocore-1.29.43-py3-none-any.whl", hash = "sha256:dc60385c56b960aa75ef05cdcf808e6c07ad04b1b392d1abd6fc405a16d85826"}, + {file = "botocore-1.29.43.tar.gz", hash = "sha256:a801e40f5f14c1b2fb3c0b2c438b546f07805d53d57d8dc135ebce4fdce901bd"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = ">=1.25.4,<1.27" + +[package.extras] +crt = ["awscrt (==0.15.3)"] + +[[package]] +name = "certifi" +version = "2021.10.8" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, + {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, +] + +[[package]] +name = "charset-normalizer" +version = "2.0.12" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.5.0" +groups = ["main"] +files = [ + {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, + {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, +] + +[package.extras] +unicode-backport = ["unicodedata2"] + +[[package]] +name = "click" +version = "8.0.3" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"}, + {file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] +markers = "platform_system == \"Windows\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "cycler" +version = "0.11.0" +description = "Composable style cycles" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, + {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, +] + +[[package]] +name = "dj-database-url" +version = "0.5.0" +description = "Use Database URLs in your Django Application." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "dj-database-url-0.5.0.tar.gz", hash = "sha256:4aeaeb1f573c74835b0686a2b46b85990571159ffc21aa57ecd4d1e1cb334163"}, + {file = "dj_database_url-0.5.0-py2.py3-none-any.whl", hash = "sha256:851785365761ebe4994a921b433062309eb882fedd318e1b0fcecc607ed02da9"}, +] + +[[package]] +name = "django" +version = "2.2" +description = "A high-level Python Web framework that encourages rapid development and clean, pragmatic design." +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "Django-2.2-py3-none-any.whl", hash = "sha256:a2814bffd1f007805b19194eb0b9a331933b82bd5da1c3ba3d7b7ba16e06dc4b"}, + {file = "Django-2.2.tar.gz", hash = "sha256:7c3543e4fb070d14e10926189a7fcf42ba919263b7473dceaefce34d54e8a119"}, +] + +[package.dependencies] +pytz = "*" +sqlparse = "*" + +[package.extras] +argon2 = ["argon2-cffi (>=16.1.0)"] +bcrypt = ["bcrypt"] + +[[package]] +name = "exceptiongroup" +version = "1.1.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "exceptiongroup-1.1.0-py3-none-any.whl", hash = "sha256:327cbda3da756e2de031a3107b81ab7b3770a602c4d16ca618298c526f4bec1e"}, + {file = "exceptiongroup-1.1.0.tar.gz", hash = "sha256:bcb67d800a4497e1b404c2dd44fca47d3b7a5e5433dbab67f96c1a685cdfdf23"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "frozenlist" +version = "1.3.3" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"}, + {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"}, + {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"}, + {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"}, + {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"}, + {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"}, + {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"}, + {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"}, + {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"}, + {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"}, + {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"}, + {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"}, + {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"}, + {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"}, + {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"}, + {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"}, + {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"}, + {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"}, + {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"}, + {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"}, + {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"}, + {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"}, + {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"}, + {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"}, + {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"}, + {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"}, + {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"}, + {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"}, +] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "idna" +version = "3.3" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, + {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "multidict" +version = "6.0.4" +description = "multidict implementation" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, + {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, + {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, + {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, + {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, + {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, + {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, + {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, + {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, + {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, + {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, + {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, + {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, +] + +[[package]] +name = "numpy" +version = "1.25.2" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3"}, + {file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f"}, + {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187"}, + {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357"}, + {file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9"}, + {file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044"}, + {file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545"}, + {file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418"}, + {file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f"}, + {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2"}, + {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf"}, + {file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364"}, + {file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d"}, + {file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4"}, + {file = "numpy-1.25.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3"}, + {file = "numpy-1.25.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926"}, + {file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca"}, + {file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295"}, + {file = "numpy-1.25.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f"}, + {file = "numpy-1.25.2-cp39-cp39-win32.whl", hash = "sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01"}, + {file = "numpy-1.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901"}, + {file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf"}, + {file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"}, +] + +[[package]] +name = "openai" +version = "0.27.0" +description = "Python client library for the OpenAI API" +optional = false +python-versions = ">=3.7.1" +groups = ["main"] +files = [ + {file = "openai-0.27.0-py3-none-any.whl", hash = "sha256:7d4066ea529658a1f1696a8766912b3f3b7139faca860dc530bf102f0b19d42c"}, + {file = "openai-0.27.0.tar.gz", hash = "sha256:0efa486016283607cb72c23eac346ac739a22613ff4bff101df72a138cb2de60"}, +] + +[package.dependencies] +aiohttp = "*" +requests = ">=2.20" +tqdm = "*" + +[package.extras] +datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] +embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] +wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] + +[[package]] +name = "packaging" +version = "23.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, + {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, +] + +[[package]] +name = "pillow" +version = "8.4.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "Pillow-8.4.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:81f8d5c81e483a9442d72d182e1fb6dcb9723f289a57e8030811bac9ea3fef8d"}, + {file = "Pillow-8.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3f97cfb1e5a392d75dd8b9fd274d205404729923840ca94ca45a0af57e13dbe6"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb9fc393f3c61f9054e1ed26e6fe912c7321af2f41ff49d3f83d05bacf22cc78"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d82cdb63100ef5eedb8391732375e6d05993b765f72cb34311fab92103314649"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc1afda735a8d109007164714e73771b499768b9bb5afcbbee9d0ff374b43f"}, + {file = "Pillow-8.4.0-cp310-cp310-win32.whl", hash = "sha256:e3dacecfbeec9a33e932f00c6cd7996e62f53ad46fbe677577394aaa90ee419a"}, + {file = "Pillow-8.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:620582db2a85b2df5f8a82ddeb52116560d7e5e6b055095f04ad828d1b0baa39"}, + {file = "Pillow-8.4.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:1bc723b434fbc4ab50bb68e11e93ce5fb69866ad621e3c2c9bdb0cd70e345f55"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cbcfd54df6caf85cc35264c77ede902452d6df41166010262374155947460c"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70ad9e5c6cb9b8487280a02c0ad8a51581dcbbe8484ce058477692a27c151c0a"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a49dc2e2f74e65efaa32b153527fc5ac98508d502fa46e74fa4fd678ed6645"}, + {file = "Pillow-8.4.0-cp36-cp36m-win32.whl", hash = "sha256:93ce9e955cc95959df98505e4608ad98281fff037350d8c2671c9aa86bcf10a9"}, + {file = "Pillow-8.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2e4440b8f00f504ee4b53fe30f4e381aae30b0568193be305256b1462216feff"}, + {file = "Pillow-8.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8c803ac3c28bbc53763e6825746f05cc407b20e4a69d0122e526a582e3b5e153"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a17b5d948f4ceeceb66384727dde11b240736fddeda54ca740b9b8b1556b29"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1394a6ad5abc838c5cd8a92c5a07535648cdf6d09e8e2d6df916dfa9ea86ead8"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:792e5c12376594bfcb986ebf3855aa4b7c225754e9a9521298e460e92fb4a488"}, + {file = "Pillow-8.4.0-cp37-cp37m-win32.whl", hash = "sha256:d99ec152570e4196772e7a8e4ba5320d2d27bf22fdf11743dd882936ed64305b"}, + {file = "Pillow-8.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7b7017b61bbcdd7f6363aeceb881e23c46583739cb69a3ab39cb384f6ec82e5b"}, + {file = "Pillow-8.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:d89363f02658e253dbd171f7c3716a5d340a24ee82d38aab9183f7fdf0cdca49"}, + {file = "Pillow-8.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a0956fdc5defc34462bb1c765ee88d933239f9a94bc37d132004775241a7585"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b7bb9de00197fb4261825c15551adf7605cf14a80badf1761d61e59da347779"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72b9e656e340447f827885b8d7a15fc8c4e68d410dc2297ef6787eec0f0ea409"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5a4532a12314149d8b4e4ad8ff09dde7427731fcfa5917ff16d0291f13609df"}, + {file = "Pillow-8.4.0-cp38-cp38-win32.whl", hash = "sha256:82aafa8d5eb68c8463b6e9baeb4f19043bb31fefc03eb7b216b51e6a9981ae09"}, + {file = "Pillow-8.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:066f3999cb3b070a95c3652712cffa1a748cd02d60ad7b4e485c3748a04d9d76"}, + {file = "Pillow-8.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:5503c86916d27c2e101b7f71c2ae2cddba01a2cf55b8395b0255fd33fa4d1f1a"}, + {file = "Pillow-8.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4acc0985ddf39d1bc969a9220b51d94ed51695d455c228d8ac29fcdb25810e6e"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b052a619a8bfcf26bd8b3f48f45283f9e977890263e4571f2393ed8898d331b"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:493cb4e415f44cd601fcec11c99836f707bb714ab03f5ed46ac25713baf0ff20"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8831cb7332eda5dc89b21a7bce7ef6ad305548820595033a4b03cf3091235ed"}, + {file = "Pillow-8.4.0-cp39-cp39-win32.whl", hash = "sha256:5e9ac5f66616b87d4da618a20ab0a38324dbe88d8a39b55be8964eb520021e02"}, + {file = "Pillow-8.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:3eb1ce5f65908556c2d8685a8f0a6e989d887ec4057326f6c22b24e8a172c66b"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ddc4d832a0f0b4c52fff973a0d44b6c99839a9d016fe4e6a1cb8f3eea96479c2"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3e5ddc44c14042f0844b8cf7d2cd455f6cc80fd7f5eefbe657292cf601d9ad"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c70e94281588ef053ae8998039610dbd71bc509e4acbc77ab59d7d2937b10698"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:3862b7256046fcd950618ed22d1d60b842e3a40a48236a5498746f21189afbbc"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4901622493f88b1a29bd30ec1a2f683782e57c3c16a2dbc7f2595ba01f639df"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c471a734240653a0ec91dec0996696eea227eafe72a33bd06c92697728046b"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:244cf3b97802c34c41905d22810846802a3329ddcb93ccc432870243211c79fc"}, + {file = "Pillow-8.4.0.tar.gz", hash = "sha256:b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed"}, +] + +[[package]] +name = "pyparsing" +version = "3.0.6" +description = "Python parsing module" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "pyparsing-3.0.6-py3-none-any.whl", hash = "sha256:04ff808a5b90911829c55c4e26f75fa5ca8a2f5f36aa3a51f68e27033341d3e4"}, + {file = "pyparsing-3.0.6.tar.gz", hash = "sha256:d9bdec0013ef1eb5a84ab39a3b3868911598afa494f5faa038647101504e2b81"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2021.3" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, + {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"}, +] + +[[package]] +name = "regex" +version = "2021.11.10" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "regex-2021.11.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9345b6f7ee578bad8e475129ed40123d265464c4cfead6c261fd60fc9de00bcf"}, + {file = "regex-2021.11.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:416c5f1a188c91e3eb41e9c8787288e707f7d2ebe66e0a6563af280d9b68478f"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0538c43565ee6e703d3a7c3bdfe4037a5209250e8502c98f20fea6f5fdf2965"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee1227cf08b6716c85504aebc49ac827eb88fcc6e51564f010f11a406c0a667"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6650f16365f1924d6014d2ea770bde8555b4a39dc9576abb95e3cd1ff0263b36"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30ab804ea73972049b7a2a5c62d97687d69b5a60a67adca07eb73a0ddbc9e29f"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68a067c11463de2a37157930d8b153005085e42bcb7ad9ca562d77ba7d1404e0"}, + {file = "regex-2021.11.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:162abfd74e88001d20cb73ceaffbfe601469923e875caf9118333b1a4aaafdc4"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b9ed0b1e5e0759d6b7f8e2f143894b2a7f3edd313f38cf44e1e15d360e11749b"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:473e67837f786404570eae33c3b64a4b9635ae9f00145250851a1292f484c063"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2fee3ed82a011184807d2127f1733b4f6b2ff6ec7151d83ef3477f3b96a13d03"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d5fd67df77bab0d3f4ea1d7afca9ef15c2ee35dfb348c7b57ffb9782a6e4db6e"}, + {file = "regex-2021.11.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5d408a642a5484b9b4d11dea15a489ea0928c7e410c7525cd892f4d04f2f617b"}, + {file = "regex-2021.11.10-cp310-cp310-win32.whl", hash = "sha256:98ba568e8ae26beb726aeea2273053c717641933836568c2a0278a84987b2a1a"}, + {file = "regex-2021.11.10-cp310-cp310-win_amd64.whl", hash = "sha256:780b48456a0f0ba4d390e8b5f7c661fdd218934388cde1a974010a965e200e12"}, + {file = "regex-2021.11.10-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dba70f30fd81f8ce6d32ddeef37d91c8948e5d5a4c63242d16a2b2df8143aafc"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1f54b9b4b6c53369f40028d2dd07a8c374583417ee6ec0ea304e710a20f80a0"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fbb9dc00e39f3e6c0ef48edee202f9520dafb233e8b51b06b8428cfcb92abd30"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666abff54e474d28ff42756d94544cdfd42e2ee97065857413b72e8a2d6a6345"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5537f71b6d646f7f5f340562ec4c77b6e1c915f8baae822ea0b7e46c1f09b733"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2e07c6a26ed4bea91b897ee2b0835c21716d9a469a96c3e878dc5f8c55bb23"}, + {file = "regex-2021.11.10-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca5f18a75e1256ce07494e245cdb146f5a9267d3c702ebf9b65c7f8bd843431e"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:74cbeac0451f27d4f50e6e8a8f3a52ca074b5e2da9f7b505c4201a57a8ed6286"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:3598893bde43091ee5ca0a6ad20f08a0435e93a69255eeb5f81b85e81e329264"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:50a7ddf3d131dc5633dccdb51417e2d1910d25cbcf842115a3a5893509140a3a"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:61600a7ca4bcf78a96a68a27c2ae9389763b5b94b63943d5158f2a377e09d29a"}, + {file = "regex-2021.11.10-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:563d5f9354e15e048465061509403f68424fef37d5add3064038c2511c8f5e00"}, + {file = "regex-2021.11.10-cp36-cp36m-win32.whl", hash = "sha256:93a5051fcf5fad72de73b96f07d30bc29665697fb8ecdfbc474f3452c78adcf4"}, + {file = "regex-2021.11.10-cp36-cp36m-win_amd64.whl", hash = "sha256:b483c9d00a565633c87abd0aaf27eb5016de23fed952e054ecc19ce32f6a9e7e"}, + {file = "regex-2021.11.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fff55f3ce50a3ff63ec8e2a8d3dd924f1941b250b0aac3d3d42b687eeff07a8e"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e32d2a2b02ccbef10145df9135751abea1f9f076e67a4e261b05f24b94219e36"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53db2c6be8a2710b359bfd3d3aa17ba38f8aa72a82309a12ae99d3c0c3dcd74d"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2207ae4f64ad3af399e2d30dde66f0b36ae5c3129b52885f1bffc2f05ec505c8"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5ca078bb666c4a9d1287a379fe617a6dccd18c3e8a7e6c7e1eb8974330c626a"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd33eb9bdcfbabab3459c9ee651d94c842bc8a05fabc95edf4ee0c15a072495e"}, + {file = "regex-2021.11.10-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05b7d6d7e64efe309972adab77fc2af8907bb93217ec60aa9fe12a0dad35874f"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:42b50fa6666b0d50c30a990527127334d6b96dd969011e843e726a64011485da"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6e1d2cc79e8dae442b3fa4a26c5794428b98f81389af90623ffcc650ce9f6732"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:0416f7399e918c4b0e074a0f66e5191077ee2ca32a0f99d4c187a62beb47aa05"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ce298e3d0c65bd03fa65ffcc6db0e2b578e8f626d468db64fdf8457731052942"}, + {file = "regex-2021.11.10-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dc07f021ee80510f3cd3af2cad5b6a3b3a10b057521d9e6aaeb621730d320c5a"}, + {file = "regex-2021.11.10-cp37-cp37m-win32.whl", hash = "sha256:e71255ba42567d34a13c03968736c5d39bb4a97ce98188fafb27ce981115beec"}, + {file = "regex-2021.11.10-cp37-cp37m-win_amd64.whl", hash = "sha256:07856afef5ffcc052e7eccf3213317fbb94e4a5cd8177a2caa69c980657b3cb4"}, + {file = "regex-2021.11.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba05430e819e58544e840a68b03b28b6d328aff2e41579037e8bab7653b37d83"}, + {file = "regex-2021.11.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7f301b11b9d214f83ddaf689181051e7f48905568b0c7017c04c06dfd065e244"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aaa4e0705ef2b73dd8e36eeb4c868f80f8393f5f4d855e94025ce7ad8525f50"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:788aef3549f1924d5c38263104dae7395bf020a42776d5ec5ea2b0d3d85d6646"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f8af619e3be812a2059b212064ea7a640aff0568d972cd1b9e920837469eb3cb"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85bfa6a5413be0ee6c5c4a663668a2cad2cbecdee367630d097d7823041bdeec"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f23222527b307970e383433daec128d769ff778d9b29343fb3496472dc20dabe"}, + {file = "regex-2021.11.10-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:da1a90c1ddb7531b1d5ff1e171b4ee61f6345119be7351104b67ff413843fe94"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f5be7805e53dafe94d295399cfbe5227f39995a997f4fd8539bf3cbdc8f47ca8"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a955b747d620a50408b7fdf948e04359d6e762ff8a85f5775d907ceced715129"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:139a23d1f5d30db2cc6c7fd9c6d6497872a672db22c4ae1910be22d4f4b2068a"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ca49e1ab99593438b204e00f3970e7a5f70d045267051dfa6b5f4304fcfa1dbf"}, + {file = "regex-2021.11.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:96fc32c16ea6d60d3ca7f63397bff5c75c5a562f7db6dec7d412f7c4d2e78ec0"}, + {file = "regex-2021.11.10-cp38-cp38-win32.whl", hash = "sha256:0617383e2fe465732af4509e61648b77cbe3aee68b6ac8c0b6fe934db90be5cc"}, + {file = "regex-2021.11.10-cp38-cp38-win_amd64.whl", hash = "sha256:a3feefd5e95871872673b08636f96b61ebef62971eab044f5124fb4dea39919d"}, + {file = "regex-2021.11.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7f325be2804246a75a4f45c72d4ce80d2443ab815063cdf70ee8fb2ca59ee1b"}, + {file = "regex-2021.11.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:537ca6a3586931b16a85ac38c08cc48f10fc870a5b25e51794c74df843e9966d"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef2afb0fd1747f33f1ee3e209bce1ed582d1896b240ccc5e2697e3275f037c7"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:432bd15d40ed835a51617521d60d0125867f7b88acf653e4ed994a1f8e4995dc"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b43c2b8a330a490daaef5a47ab114935002b13b3f9dc5da56d5322ff218eeadb"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:962b9a917dd7ceacbe5cd424556914cb0d636001e393b43dc886ba31d2a1e449"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa8c626d6441e2d04b6ee703ef2d1e17608ad44c7cb75258c09dd42bacdfc64b"}, + {file = "regex-2021.11.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3c5fb32cc6077abad3bbf0323067636d93307c9fa93e072771cf9a64d1c0f3ef"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:cd410a1cbb2d297c67d8521759ab2ee3f1d66206d2e4328502a487589a2cb21b"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e6096b0688e6e14af6a1b10eaad86b4ff17935c49aa774eac7c95a57a4e8c296"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:529801a0d58809b60b3531ee804d3e3be4b412c94b5d267daa3de7fadef00f49"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f594b96fe2e0821d026365f72ac7b4f0b487487fb3d4aaf10dd9d97d88a9737"}, + {file = "regex-2021.11.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2409b5c9cef7054dde93a9803156b411b677affc84fca69e908b1cb2c540025d"}, + {file = "regex-2021.11.10-cp39-cp39-win32.whl", hash = "sha256:3b5df18db1fccd66de15aa59c41e4f853b5df7550723d26aa6cb7f40e5d9da5a"}, + {file = "regex-2021.11.10-cp39-cp39-win_amd64.whl", hash = "sha256:83ee89483672b11f8952b158640d0c0ff02dc43d9cb1b70c1564b49abe92ce29"}, + {file = "regex-2021.11.10.tar.gz", hash = "sha256:f341ee2df0999bfdf7a95e448075effe0db212a59387de1a70690e4acb03d4c6"}, +] + +[[package]] +name = "requests" +version = "2.26.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +groups = ["main"] +files = [ + {file = "requests-2.26.0-py2.py3-none-any.whl", hash = "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24"}, + {file = "requests-2.26.0.tar.gz", hash = "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""} +idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""} +urllib3 = ">=1.21.1,<1.27" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton ; sys_platform == \"win32\" and python_version == \"2.7\""] +use-chardet-on-py3 = ["chardet (>=3.0.2,<5)"] + +[[package]] +name = "scipy" +version = "1.11.1" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = "<3.13,>=3.9" +groups = ["main"] +files = [ + {file = "scipy-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:aec8c62fbe52914f9cf28d846cf0401dd80ab80788bbab909434eb336ed07c04"}, + {file = "scipy-1.11.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:3b9963798df1d8a52db41a6fc0e6fa65b1c60e85d73da27ae8bb754de4792481"}, + {file = "scipy-1.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e8eb42db36526b130dfbc417609498a6192381abc1975b91e3eb238e0b41c1a"}, + {file = "scipy-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:366a6a937110d80dca4f63b3f5b00cc89d36f678b2d124a01067b154e692bab1"}, + {file = "scipy-1.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:08d957ca82d3535b3b9ba6c8ff355d78fe975271874e2af267cb5add5bd78625"}, + {file = "scipy-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:e866514bc2d660608447b6ba95c8900d591f2865c07cca0aa4f7ff3c4ca70f30"}, + {file = "scipy-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba94eeef3c9caa4cea7b402a35bb02a5714ee1ee77eb98aca1eed4543beb0f4c"}, + {file = "scipy-1.11.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:512fdc18c65f76dadaca139348e525646d440220d8d05f6d21965b8d4466bccd"}, + {file = "scipy-1.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cce154372f0ebe88556ed06d7b196e9c2e0c13080ecb58d0f35062dc7cc28b47"}, + {file = "scipy-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4bb943010203465ac81efa392e4645265077b4d9e99b66cf3ed33ae12254173"}, + {file = "scipy-1.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:249cfa465c379c9bb2c20123001e151ff5e29b351cbb7f9c91587260602c58d0"}, + {file = "scipy-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:ffb28e3fa31b9c376d0fb1f74c1f13911c8c154a760312fbee87a21eb21efe31"}, + {file = "scipy-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:39154437654260a52871dfde852adf1b93b1d1bc5dc0ffa70068f16ec0be2624"}, + {file = "scipy-1.11.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:b588311875c58d1acd4ef17c983b9f1ab5391755a47c3d70b6bd503a45bfaf71"}, + {file = "scipy-1.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d51565560565a0307ed06fa0ec4c6f21ff094947d4844d6068ed04400c72d0c3"}, + {file = "scipy-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b41a0f322b4eb51b078cb3441e950ad661ede490c3aca66edef66f4b37ab1877"}, + {file = "scipy-1.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:396fae3f8c12ad14c5f3eb40499fd06a6fef8393a6baa352a652ecd51e74e029"}, + {file = "scipy-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:be8c962a821957fdde8c4044efdab7a140c13294997a407eaee777acf63cbf0c"}, + {file = "scipy-1.11.1.tar.gz", hash = "sha256:fb5b492fa035334fd249f0973cc79ecad8b09c604b42a127a677b45a9a3d4289"}, +] + +[package.dependencies] +numpy = ">=1.21.6,<1.28.0" + +[package.extras] +dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] +doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "sqlparse" +version = "0.4.3" +description = "A non-validating SQL parser." +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "sqlparse-0.4.3-py3-none-any.whl", hash = "sha256:0323c0ec29cd52bceabc1b4d9d579e311f3e4961b98d174201d5622a23b85e34"}, + {file = "sqlparse-0.4.3.tar.gz", hash = "sha256:69ca804846bb114d2ec380e4360a8a340db83f0ccf3afceeb1404df028f57268"}, +] + +[[package]] +name = "tqdm" +version = "4.62.3" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] +files = [ + {file = "tqdm-4.62.3-py2.py3-none-any.whl", hash = "sha256:8dd278a422499cd6b727e6ae4061c40b48fce8b76d1ccbf5d34fca9b7f925b0c"}, + {file = "tqdm-4.62.3.tar.gz", hash = "sha256:d359de7217506c9851b7869f3708d8ee53ed70a1b8edbba4dbcb47442592920d"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["py-make (>=0.1.0)", "twine", "wheel"] +notebook = ["ipywidgets (>=6)"] +telegram = ["requests"] + +[[package]] +name = "typing-extensions" +version = "4.0.0" +description = "Backported and Experimental Type Hints for Python 3.6+" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "typing_extensions-4.0.0-py3-none-any.whl", hash = "sha256:829704698b22e13ec9eaf959122315eabb370b0884400e9818334d8b677023d9"}, + {file = "typing_extensions-4.0.0.tar.gz", hash = "sha256:2cdf80e4e04866a9b3689a51869016d36db0814d84b8d8a568d22781d45d27ed"}, +] + +[[package]] +name = "urllib3" +version = "1.26.7" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +groups = ["main"] +files = [ + {file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"}, + {file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"}, +] + +[package.extras] +brotli = ["brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "yarl" +version = "1.8.2" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bb81f753c815f6b8e2ddd2eef3c855cf7da193b82396ac013c661aaa6cc6b0a5"}, + {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:47d49ac96156f0928f002e2424299b2c91d9db73e08c4cd6742923a086f1c863"}, + {file = "yarl-1.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3fc056e35fa6fba63248d93ff6e672c096f95f7836938241ebc8260e062832fe"}, + {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58a3c13d1c3005dbbac5c9f0d3210b60220a65a999b1833aa46bd6677c69b08e"}, + {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10b08293cda921157f1e7c2790999d903b3fd28cd5c208cf8826b3b508026996"}, + {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de986979bbd87272fe557e0a8fcb66fd40ae2ddfe28a8b1ce4eae22681728fef"}, + {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c4fcfa71e2c6a3cb568cf81aadc12768b9995323186a10827beccf5fa23d4f8"}, + {file = "yarl-1.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae4d7ff1049f36accde9e1ef7301912a751e5bae0a9d142459646114c70ecba6"}, + {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bf071f797aec5b96abfc735ab97da9fd8f8768b43ce2abd85356a3127909d146"}, + {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:74dece2bfc60f0f70907c34b857ee98f2c6dd0f75185db133770cd67300d505f"}, + {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:df60a94d332158b444301c7f569659c926168e4d4aad2cfbf4bce0e8fb8be826"}, + {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:63243b21c6e28ec2375f932a10ce7eda65139b5b854c0f6b82ed945ba526bff3"}, + {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cfa2bbca929aa742b5084fd4663dd4b87c191c844326fcb21c3afd2d11497f80"}, + {file = "yarl-1.8.2-cp310-cp310-win32.whl", hash = "sha256:b05df9ea7496df11b710081bd90ecc3a3db6adb4fee36f6a411e7bc91a18aa42"}, + {file = "yarl-1.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:24ad1d10c9db1953291f56b5fe76203977f1ed05f82d09ec97acb623a7976574"}, + {file = "yarl-1.8.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2a1fca9588f360036242f379bfea2b8b44cae2721859b1c56d033adfd5893634"}, + {file = "yarl-1.8.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f37db05c6051eff17bc832914fe46869f8849de5b92dc4a3466cd63095d23dfd"}, + {file = "yarl-1.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77e913b846a6b9c5f767b14dc1e759e5aff05502fe73079f6f4176359d832581"}, + {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0978f29222e649c351b173da2b9b4665ad1feb8d1daa9d971eb90df08702668a"}, + {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388a45dc77198b2460eac0aca1efd6a7c09e976ee768b0d5109173e521a19daf"}, + {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2305517e332a862ef75be8fad3606ea10108662bc6fe08509d5ca99503ac2aee"}, + {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42430ff511571940d51e75cf42f1e4dbdded477e71c1b7a17f4da76c1da8ea76"}, + {file = "yarl-1.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3150078118f62371375e1e69b13b48288e44f6691c1069340081c3fd12c94d5b"}, + {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c15163b6125db87c8f53c98baa5e785782078fbd2dbeaa04c6141935eb6dab7a"}, + {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4d04acba75c72e6eb90745447d69f84e6c9056390f7a9724605ca9c56b4afcc6"}, + {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e7fd20d6576c10306dea2d6a5765f46f0ac5d6f53436217913e952d19237efc4"}, + {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:75c16b2a900b3536dfc7014905a128a2bea8fb01f9ee26d2d7d8db0a08e7cb2c"}, + {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6d88056a04860a98341a0cf53e950e3ac9f4e51d1b6f61a53b0609df342cc8b2"}, + {file = "yarl-1.8.2-cp311-cp311-win32.whl", hash = "sha256:fb742dcdd5eec9f26b61224c23baea46c9055cf16f62475e11b9b15dfd5c117b"}, + {file = "yarl-1.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:8c46d3d89902c393a1d1e243ac847e0442d0196bbd81aecc94fcebbc2fd5857c"}, + {file = "yarl-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ceff9722e0df2e0a9e8a79c610842004fa54e5b309fe6d218e47cd52f791d7ef"}, + {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f6b4aca43b602ba0f1459de647af954769919c4714706be36af670a5f44c9c1"}, + {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1684a9bd9077e922300ecd48003ddae7a7474e0412bea38d4631443a91d61077"}, + {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebb78745273e51b9832ef90c0898501006670d6e059f2cdb0e999494eb1450c2"}, + {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3adeef150d528ded2a8e734ebf9ae2e658f4c49bf413f5f157a470e17a4a2e89"}, + {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a7c87927a468e5a1dc60c17caf9597161d66457a34273ab1760219953f7f4c"}, + {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:efff27bd8cbe1f9bd127e7894942ccc20c857aa8b5a0327874f30201e5ce83d0"}, + {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a783cd344113cb88c5ff7ca32f1f16532a6f2142185147822187913eb989f739"}, + {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:705227dccbe96ab02c7cb2c43e1228e2826e7ead880bb19ec94ef279e9555b5b"}, + {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:34c09b43bd538bf6c4b891ecce94b6fa4f1f10663a8d4ca589a079a5018f6ed7"}, + {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a48f4f7fea9a51098b02209d90297ac324241bf37ff6be6d2b0149ab2bd51b37"}, + {file = "yarl-1.8.2-cp37-cp37m-win32.whl", hash = "sha256:0414fd91ce0b763d4eadb4456795b307a71524dbacd015c657bb2a39db2eab89"}, + {file = "yarl-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:d881d152ae0007809c2c02e22aa534e702f12071e6b285e90945aa3c376463c5"}, + {file = "yarl-1.8.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5df5e3d04101c1e5c3b1d69710b0574171cc02fddc4b23d1b2813e75f35a30b1"}, + {file = "yarl-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a66c506ec67eb3159eea5096acd05f5e788ceec7b96087d30c7d2865a243918"}, + {file = "yarl-1.8.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2b4fa2606adf392051d990c3b3877d768771adc3faf2e117b9de7eb977741229"}, + {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e21fb44e1eff06dd6ef971d4bdc611807d6bd3691223d9c01a18cec3677939e"}, + {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93202666046d9edadfe9f2e7bf5e0782ea0d497b6d63da322e541665d65a044e"}, + {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc77086ce244453e074e445104f0ecb27530d6fd3a46698e33f6c38951d5a0f1"}, + {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dd68a92cab699a233641f5929a40f02a4ede8c009068ca8aa1fe87b8c20ae3"}, + {file = "yarl-1.8.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b372aad2b5f81db66ee7ec085cbad72c4da660d994e8e590c997e9b01e44901"}, + {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e6f3515aafe0209dd17fb9bdd3b4e892963370b3de781f53e1746a521fb39fc0"}, + {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dfef7350ee369197106805e193d420b75467b6cceac646ea5ed3049fcc950a05"}, + {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:728be34f70a190566d20aa13dc1f01dc44b6aa74580e10a3fb159691bc76909d"}, + {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ff205b58dc2929191f68162633d5e10e8044398d7a45265f90a0f1d51f85f72c"}, + {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baf211dcad448a87a0d9047dc8282d7de59473ade7d7fdf22150b1d23859f946"}, + {file = "yarl-1.8.2-cp38-cp38-win32.whl", hash = "sha256:272b4f1599f1b621bf2aabe4e5b54f39a933971f4e7c9aa311d6d7dc06965165"}, + {file = "yarl-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:326dd1d3caf910cd26a26ccbfb84c03b608ba32499b5d6eeb09252c920bcbe4f"}, + {file = "yarl-1.8.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f8ca8ad414c85bbc50f49c0a106f951613dfa5f948ab69c10ce9b128d368baf8"}, + {file = "yarl-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:418857f837347e8aaef682679f41e36c24250097f9e2f315d39bae3a99a34cbf"}, + {file = "yarl-1.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ae0eec05ab49e91a78700761777f284c2df119376e391db42c38ab46fd662b77"}, + {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:009a028127e0a1755c38b03244c0bea9d5565630db9c4cf9572496e947137a87"}, + {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3edac5d74bb3209c418805bda77f973117836e1de7c000e9755e572c1f7850d0"}, + {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da65c3f263729e47351261351b8679c6429151ef9649bba08ef2528ff2c423b2"}, + {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef8fb25e52663a1c85d608f6dd72e19bd390e2ecaf29c17fb08f730226e3a08"}, + {file = "yarl-1.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcd7bb1e5c45274af9a1dd7494d3c52b2be5e6bd8d7e49c612705fd45420b12d"}, + {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44ceac0450e648de86da8e42674f9b7077d763ea80c8ceb9d1c3e41f0f0a9951"}, + {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:97209cc91189b48e7cfe777237c04af8e7cc51eb369004e061809bcdf4e55220"}, + {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:48dd18adcf98ea9cd721a25313aef49d70d413a999d7d89df44f469edfb38a06"}, + {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e59399dda559688461762800d7fb34d9e8a6a7444fd76ec33220a926c8be1516"}, + {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d617c241c8c3ad5c4e78a08429fa49e4b04bedfc507b34b4d8dceb83b4af3588"}, + {file = "yarl-1.8.2-cp39-cp39-win32.whl", hash = "sha256:cb6d48d80a41f68de41212f3dfd1a9d9898d7841c8f7ce6696cf2fd9cb57ef83"}, + {file = "yarl-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:6604711362f2dbf7160df21c416f81fac0de6dbcf0b5445a2ef25478ecc4c778"}, + {file = "yarl-1.8.2.tar.gz", hash = "sha256:49d43402c6e3013ad0978602bf6bf5328535c48d192304b91b97a3c6790b1562"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[metadata] +lock-version = "2.1" +python-versions = ">=3.9, <3.13" +content-hash = "26ce3a5eaf88e22535f2e73c4df02ae47187105212ebaa348b70b3007f40aae6" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..8f57aa6a31 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,52 @@ +[tool.poetry] +name = "generative-agents" +version = "0.1.0" +description = "Generative Agents: Interactive Simulacra of Human Behavior" +authors = ["Stanford HCI Group "] +readme = "README.md" + +[tool.poetry.dependencies] +python = ">=3.9, <3.13" +aiohttp = "3.8.3" +aiosignal = "1.3.1" +asgiref = "3.5.2" +async-generator = "1.10" +async-timeout = "4.0.2" +attrs = "22.2.0" +certifi = "2021.10.8" +charset-normalizer = "2.0.12" +click = "8.0.3" +django = "2.2" +frozenlist = "1.3.3" +multidict = "6.0.4" +numpy = "1.25.2" +openai = "0.27.0" +pillow = "8.4.0" +python-dateutil = "2.8.2" +pytz = "2021.3" +regex = "2021.11.10" +requests = "2.26.0" +six = "1.16.0" +sqlparse = "0.4.3" +tqdm = "4.62.3" +typing-extensions = "4.0.0" +urllib3 = "1.26.7" +yarl = "1.8.2" +idna = "3.3" +exceptiongroup = "1.1.0" +sniffio = "1.3.0" +h11 = "0.14.0" +packaging = "23.0" +pyparsing = "3.0.6" +scipy = "1.11.1" +boto = "2.49.0" +botocore = "1.29.43" +cycler = "0.11.0" +dj-database-url = "0.5.0" + +[tool.poetry.group.dev.dependencies] +# Development dependencies will be added here + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/reverie/backend_server/global_methods.py b/reverie/backend_server/global_methods.py index 6bdcefbb12..283188d78e 100644 --- a/reverie/backend_server/global_methods.py +++ b/reverie/backend_server/global_methods.py @@ -1,8 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: global_methods.py -Description: Contains functions used throughout my projects. +文件: global_methods.py +描述: 包含在我的各个项目中使用的函数。 """ import random import string @@ -20,23 +20,18 @@ def create_folder_if_not_there(curr_path): """ - Checks if a folder in the curr_path exists. If it does not exist, creates - the folder. - Note that if the curr_path designates a file location, it will operate on - the folder that contains the file. But the function also works even if the - path designates to just a folder. - Args: - curr_list: list to write. The list comes in the following form: - [['key1', 'val1-1', 'val1-2'...], - ['key2', 'val2-1', 'val2-2'...],] - outfile: name of the csv file to write - RETURNS: - True: if a new folder is created - False: if a new folder is not created + 检查 curr_path 中的文件夹是否存在。如果不存在,则创建该文件夹。 + 请注意,如果 curr_path 指定的是文件位置,则它将操作包含该文件的文件夹。 + 但即使用于仅指定文件夹的路径,此函数也同样有效。 + 参数: + curr_path: 当前路径 + 返回: + True: 如果创建了新文件夹 + False: 如果未创建新文件夹 """ outfolder_name = curr_path.split("/") if len(outfolder_name) != 1: - # This checks if the curr path is a file or a folder. + # 这会检查当前路径是文件还是文件夹。 if "." in outfolder_name[-1]: outfolder_name = outfolder_name[:-1] @@ -50,15 +45,15 @@ def create_folder_if_not_there(curr_path): def write_list_of_list_to_csv(curr_list_of_list, outfile): """ - Writes a list of list to csv. - Unlike write_list_to_csv_line, it writes the entire csv in one shot. - ARGS: - curr_list_of_list: list to write. The list comes in the following form: + 将列表的列表写入 CSV 文件。 + 与 write_list_to_csv_line 不同,它一次性写入整个 CSV 文件。 + 参数: + curr_list_of_list: 要写入的列表。列表格式如下: [['key1', 'val1-1', 'val1-2'...], ['key2', 'val2-1', 'val2-2'...],] - outfile: name of the csv file to write - RETURNS: - None + outfile: 要写入的 CSV 文件的名称 + 返回: + 无 """ create_folder_if_not_there(outfile) with open(outfile, "w") as f: @@ -68,21 +63,21 @@ def write_list_of_list_to_csv(curr_list_of_list, outfile): def write_list_to_csv_line(line_list, outfile): """ - Writes one line to a csv file. - Unlike write_list_of_list_to_csv, this opens an existing outfile and then - appends a line to that file. - This also works if the file does not exist already. - ARGS: - curr_list: list to write. The list comes in the following form: + 将一行写入 CSV 文件。 + 与 write_list_of_list_to_csv 不同,此函数会打开一个现有的输出文件, + 然后向该文件追加一行。 + 如果文件尚不存在,此函数也同样有效。 + 参数: + line_list: 要写入的列表。列表格式如下: ['key1', 'val1-1', 'val1-2'...] - Importantly, this is NOT a list of list. - outfile: name of the csv file to write - RETURNS: - None + 重要的是,这不是列表的列表。 + outfile: 要写入的 CSV 文件的名称 + 返回: + 无 """ create_folder_if_not_there(outfile) - # Opening the file first so we can write incrementally as we progress + # 首先打开文件,以便我们可以随着进度增量写入 curr_file = open(outfile, 'a',) csvfile_1 = csv.writer(curr_file) csvfile_1.writerow(line_list) @@ -91,12 +86,13 @@ def write_list_to_csv_line(line_list, outfile): def read_file_to_list(curr_file, header=False, strip_trail=True): """ - Reads in a csv file to a list of list. If header is True, it returns a - tuple with (header row, all rows) - ARGS: - curr_file: path to the current csv file. - RETURNS: - List of list where the component lists are the rows of the file. + 将 CSV 文件读入列表的列表。如果 header 为 True,则返回一个包含 (标题行, 所有行) 的元组。 + 参数: + curr_file: 当前 CSV 文件的路径。 + header: 布尔值,指示文件是否包含标题行 (默认为 False)。 + strip_trail: 布尔值,指示是否去除每行末尾的空白字符 (默认为 True)。 + 返回: + 列表的列表,其中内部列表是文件的行。如果 header 为 True,则返回 (标题行, 所有其他行)。 """ if not header: analysis_list = [] @@ -120,11 +116,12 @@ def read_file_to_list(curr_file, header=False, strip_trail=True): def read_file_to_set(curr_file, col=0): """ - Reads in a "single column" of a csv file to a set. - ARGS: - curr_file: path to the current csv file. - RETURNS: - Set with all items in a single column of a csv file. + 将 CSV 文件的“单列”读入一个集合。 + 参数: + curr_file: 当前 CSV 文件的路径。 + col: 要读取的列的索引 (默认为 0)。 + 返回: + 包含 CSV 文件单列中所有项的集合。 """ analysis_set = set() with open(curr_file) as f_analysis_file: @@ -136,12 +133,12 @@ def read_file_to_set(curr_file, col=0): def get_row_len(curr_file): """ - Get the number of rows in a csv file - ARGS: - curr_file: path to the current csv file. - RETURNS: - The number of rows - False if the file does not exist + 获取 CSV 文件中的行数。 + 参数: + curr_file: 当前 CSV 文件的路径。 + 返回: + 行数。 + 如果文件不存在,则返回 False。 """ try: analysis_set = set() @@ -156,12 +153,12 @@ def get_row_len(curr_file): def check_if_file_exists(curr_file): """ - Checks if a file exists - ARGS: - curr_file: path to the current csv file. - RETURNS: - True if the file exists - False if the file does not exist + 检查文件是否存在。 + 参数: + curr_file: 当前文件的路径。 + 返回: + 如果文件存在,则为 True。 + 如果文件不存在,则为 False。 """ try: with open(curr_file) as f_analysis_file: pass @@ -172,13 +169,12 @@ def check_if_file_exists(curr_file): def find_filenames(path_to_dir, suffix=".csv"): """ - Given a directory, find all files that ends with the provided suffix and - returns their paths. - ARGS: - path_to_dir: Path to the current directory - suffix: The target suffix. - RETURNS: - A list of paths to all files in the directory. + 给定一个目录,查找所有以提供的后缀结尾的文件,并返回它们的路径。 + 参数: + path_to_dir: 当前目录的路径。 + suffix: 目标后缀 (默认为 ".csv")。 + 返回: + 目录中所有匹配文件的路径列表。 """ filenames = listdir(path_to_dir) return [ path_to_dir+"/"+filename @@ -187,22 +183,22 @@ def find_filenames(path_to_dir, suffix=".csv"): def average(list_of_val): """ - Finds the average of the numbers in a list. - ARGS: - list_of_val: a list of numeric values - RETURNS: - The average of the values + 计算列表中数字的平均值。 + 参数: + list_of_val: 一个数值列表。 + 返回: + 这些值的平均值。 """ return sum(list_of_val)/float(len(list_of_val)) def std(list_of_val): """ - Finds the std of the numbers in a list. - ARGS: - list_of_val: a list of numeric values - RETURNS: - The std of the values + 计算列表中数字的标准差。 + 参数: + list_of_val: 一个数值列表。 + 返回: + 这些值的标准差。 """ std = numpy.std(list_of_val) return std @@ -210,16 +206,16 @@ def std(list_of_val): def copyanything(src, dst): """ - Copy over everything in the src folder to dst folder. - ARGS: - src: address of the source folder - dst: address of the destination folder - RETURNS: - None + 将源文件夹中的所有内容复制到目标文件夹。 + 参数: + src: 源文件夹的地址。 + dst: 目标文件夹的地址。 + 返回: + 无 """ try: shutil.copytree(src, dst) - except OSError as exc: # python >2.5 + except OSError as exc: # python >2.5 版本 if exc.errno in (errno.ENOTDIR, errno.EINVAL): shutil.copy(src, dst) else: raise diff --git a/reverie/backend_server/maze.py b/reverie/backend_server/maze.py index 7d286e31ea..7443895884 100644 --- a/reverie/backend_server/maze.py +++ b/reverie/backend_server/maze.py @@ -1,9 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: maze.py -Description: Defines the Maze class, which represents the map of the simulated -world in a 2-dimensional matrix. +文件: maze.py +描述: 定义了 Maze 类,该类以二维矩阵表示模拟世界的地图。 """ import json import numpy @@ -17,34 +16,29 @@ class Maze: def __init__(self, maze_name): - # READING IN THE BASIC META INFORMATION ABOUT THE MAP + # 读取地图的基本元信息 self.maze_name = maze_name - # Reading in the meta information about the world. If you want tp see the - # example variables, check out the maze_meta_info.json file. + # 读取关于世界的元信息。如果你想查看示例变量,请查阅 maze_meta_info.json 文件。 meta_info = json.load(open(f"{env_matrix}/maze_meta_info.json")) - # and denote the number of tiles make up the - # height and width of the map. + # 表示构成地图高度和宽度的瓦片数量。 self.maze_width = int(meta_info["maze_width"]) self.maze_height = int(meta_info["maze_height"]) - # denotes the pixel height/width of a tile. + # 表示一个瓦片的像素高度/宽度。 self.sq_tile_size = int(meta_info["sq_tile_size"]) - # is a string description of any relevant special - # constraints the world might have. - # e.g., "planning to stay at home all day and never go out of her home" + # 是对世界可能具有的任何相关特殊约束的字符串描述。 + # 例如:"planning to stay at home all day and never go out of her home" (计划整天呆在家里,永不迈出家门) self.special_constraint = meta_info["special_constraint"] - # READING IN SPECIAL BLOCKS - # Special blocks are those that are colored in the Tiled map. + # 读取特殊区块 + # 特殊区块是指在 Tiled 地图中着色的那些区块。 - # Here is an example row for the arena block file: - # e.g., "25335, Double Studio, Studio, Common Room" - # And here is another example row for the game object block file: - # e.g, "25331, Double Studio, Studio, Bedroom 2, Painting" + # 这是竞技场区块文件的示例行: + # 例如:"25335, Double Studio, Studio, Common Room" + # 这是游戏对象区块文件的另一个示例行: + # 例如:"25331, Double Studio, Studio, Bedroom 2, Painting" - # Notice that the first element here is the color marker digit from the - # Tiled export. Then we basically have the block path: - # World, Sector, Arena, Game Object -- again, these paths need to be - # unique within an instance of Reverie. + # 注意,这里的第一个元素是来自 Tiled 导出的颜色标记数字。然后我们基本上就有了区块路径: + # 世界, 区域, 竞技场, 游戏对象 —— 再次强调,这些路径在 Reverie 的一个实例中必须是唯一的。 blocks_folder = f"{env_matrix}/special_blocks" _wb = blocks_folder + "/world_blocks.csv" @@ -71,9 +65,8 @@ def __init__(self, maze_name): slb_dict = dict() for i in slb_rows: slb_dict[i[0]] = i[-1] - # [SECTION 3] Reading in the matrices - # This is your typical two dimensional matrices. It's made up of 0s and - # the number that represents the color block from the blocks folder. + # [第 3 节] 读取矩阵 + # 这是典型的二维矩阵。它由 0 和代表区块文件夹中颜色区块的数字组成。 maze_folder = f"{env_matrix}/maze" _cm = maze_folder + "/collision_maze.csv" @@ -87,15 +80,12 @@ def __init__(self, maze_name): _slm = maze_folder + "/spawning_location_maze.csv" spawning_location_maze_raw = read_file_to_list(_slm, header=False)[0] - # Loading the maze. The mazes are taken directly from the json exports of - # Tiled maps. They should be in csv format. - # Importantly, they are "not" in a 2-d matrix format -- they are single - # row matrices with the length of width x height of the maze. So we need - # to convert here. - # We can do this all at once since the dimension of all these matrices are - # identical (e.g., 70 x 40). - # example format: [['0', '0', ... '25309', '0',...], ['0',...]...] - # 25309 is the collision bar number right now. + # 加载迷宫。迷宫直接取自 Tiled 地图的 json 导出文件。它们应该是 csv 格式。 + # 重要的是,它们“不是”二维矩阵格式——它们是单行矩阵, + # 长度为迷宫的宽度 x 高度。所以我们需要在这里进行转换。 + # 我们可以一次性完成所有这些操作,因为所有这些矩阵的维度都相同(例如,70 x 40)。 + # 示例格式: [['0', '0', ... '25309', '0',...], ['0',...]...] + # 25309 当前是碰撞条的编号。 self.collision_maze = [] sector_maze = [] arena_maze = [] @@ -109,16 +99,15 @@ def __init__(self, maze_name): game_object_maze += [game_object_maze_raw[i:i+tw]] spawning_location_maze += [spawning_location_maze_raw[i:i+tw]] - # Once we are done loading in the maze, we now set up self.tiles. This is - # a matrix accessed by row:col where each access point is a dictionary - # that contains all the things that are taking place in that tile. - # More specifically, it contains information about its "world," "sector," - # "arena," "game_object," "spawning_location," as well as whether it is a - # collision block, and a set of all events taking place in it. - # e.g., self.tiles[32][59] = {'world': 'double studio', + # 加载完迷宫后,我们现在设置 self.tiles。这是一个通过 行:列 访问的矩阵, + # 其中每个访问点都是一个字典,包含该瓦片中发生的所有事情。 + # 更具体地说,它包含有关其 "world" (世界)、"sector" (区域)、 + # "arena" (竞技场)、"game_object" (游戏对象)、"spawning_location" (出生点位置) 的信息, + # 以及它是否是碰撞块,和其中发生的所有事件的集合。 + # 例如,self.tiles[32][59] = {'world': 'double studio', # 'sector': '', 'arena': '', 'game_object': '', # 'spawning_location': '', 'collision': False, 'events': set()} - # e.g., self.tiles[9][58] = {'world': 'double studio', + # 例如,self.tiles[9][58] = {'world': 'double studio', # 'sector': 'double studio', 'arena': 'bedroom 2', # 'game_object': 'bed', 'spawning_location': 'bedroom-2-a', # 'collision': False, @@ -148,15 +137,14 @@ def __init__(self, maze_name): tile_details["spawning_location"] = slb_dict[spawning_location_maze[i][j]] tile_details["collision"] = False - if self.collision_maze[i][j] != "0": + if self.collision_maze[i][j] != "0": # "0" is a data value tile_details["collision"] = True tile_details["events"] = set() row += [tile_details] self.tiles += [row] - # Each game object occupies an event in the tile. We are setting up the - # default event value here. + # 每个游戏对象在瓦片中占据一个事件。我们在这里设置默认事件值。 for i in range(self.maze_height): for j in range(self.maze_width): if self.tiles[i][j]["game_object"]: @@ -167,11 +155,10 @@ def __init__(self, maze_name): go_event = (object_name, None, None, None) self.tiles[i][j]["events"].add(go_event) - # Reverse tile access. - # -- given a string address, we return a set of all - # tile coordinates belonging to that address (this is opposite of - # self.tiles that give you the string address given a coordinate). This is - # an optimization component for finding paths for the personas' movement. + # 反向瓦片访问。 + # -- 给定一个字符串地址,我们返回属于该地址的所有瓦片坐标集合 + # (这与 self.tiles 相反,后者是给定坐标返回字符串地址)。 + # 这是用于查找角色移动路径的优化组件。 # self.address_tiles['bedroom-2-a'] == {(58, 9)} # self.address_tiles['double studio:recreation:pool table'] # == {(29, 14), (31, 11), (30, 14), (32, 11), ...}, @@ -207,16 +194,14 @@ def __init__(self, maze_name): def turn_coordinate_to_tile(self, px_coordinate): """ - Turns a pixel coordinate to a tile coordinate. - - INPUT - px_coordinate: The pixel coordinate of our interest. Comes in the x, y - format. - OUTPUT - tile coordinate (x, y): The tile coordinate that corresponds to the - pixel coordinate. - EXAMPLE OUTPUT - Given (1600, 384), outputs (50, 12) + 将像素坐标转换为瓦片坐标。 + + 输入 + px_coordinate: 我们感兴趣的像素坐标。格式为 x, y。 + 输出 + 瓦片坐标 (x, y): 对应于像素坐标的瓦片坐标。 + 输出示例 + 给定 (1600, 384),输出 (50, 12) """ x = math.ceil(px_coordinate[0]/self.sq_tile_size) y = math.ceil(px_coordinate[1]/self.sq_tile_size) @@ -225,15 +210,14 @@ def turn_coordinate_to_tile(self, px_coordinate): def access_tile(self, tile): """ - Returns the tiles details dictionary that is stored in self.tiles of the - designated x, y location. - - INPUT - tile: The tile coordinate of our interest in (x, y) form. - OUTPUT - The tile detail dictionary for the designated tile. - EXAMPLE OUTPUT - Given (58, 9), + 返回存储在 self.tiles 中指定 x, y 位置的瓦片详细信息字典。 + + 输入 + tile: 我们感兴趣的瓦片坐标,格式为 (x, y)。 + 输出 + 指定瓦片的详细信息字典。 + 输出示例 + 给定 (58, 9), self.tiles[9][58] = {'world': 'double studio', 'sector': 'double studio', 'arena': 'bedroom 2', 'game_object': 'bed', 'spawning_location': 'bedroom-2-a', @@ -248,16 +232,15 @@ def access_tile(self, tile): def get_tile_path(self, tile, level): """ - Get the tile string address given its coordinate. You designate the level - by giving it a string level description. - - INPUT: - tile: The tile coordinate of our interest in (x, y) form. - level: world, sector, arena, or game object - OUTPUT - The string address for the tile. - EXAMPLE OUTPUT - Given tile=(58, 9), and level=arena, + 根据坐标获取瓦片的字符串地址。通过提供字符串级别的描述来指定级别。 + + 输入: + tile: 我们感兴趣的瓦片坐标,格式为 (x, y)。 + level: world (世界), sector (区域), arena (竞技场), 或 game object (游戏对象) + 输出 + 瓦片的字符串地址。 + 输出示例 + 给定 tile=(58, 9) 且 level=arena, "double studio:double studio:bedroom 2" """ x = tile[0] @@ -285,21 +268,20 @@ def get_tile_path(self, tile, level): def get_nearby_tiles(self, tile, vision_r): """ - Given the current tile and vision_r, return a list of tiles that are - within the radius. Note that this implementation looks at a square - boundary when determining what is within the radius. - i.e., for vision_r, returns x's. + 给定当前瓦片和 vision_r (视觉半径),返回半径范围内的瓦片列表。 + 请注意,此实现在确定半径范围时查看的是方形边界。 + 即,对于 vision_r,返回 x 标记的瓦片。 x x x x x x x x x x x x P x x x x x x x x x x x x - INPUT: - tile: The tile coordinate of our interest in (x, y) form. - vision_r: The radius of the persona's vision. - OUTPUT: - nearby_tiles: a list of tiles that are within the radius. + 输入: + tile: 我们感兴趣的瓦片坐标,格式为 (x, y)。 + vision_r: 角色的视觉半径。 + 输出: + nearby_tiles: 半径范围内的瓦片列表。 """ left_end = 0 if tile[0] - vision_r > left_end: @@ -326,30 +308,28 @@ def get_nearby_tiles(self, tile, vision_r): def add_event_from_tile(self, curr_event, tile): """ - Add an event triple to a tile. - - INPUT: - curr_event: Current event triple. - e.g., ('double studio:double studio:bedroom 2:bed', None, - None) - tile: The tile coordinate of our interest in (x, y) form. - OUPUT: - None + 将事件三元组添加到瓦片。 + + 输入: + curr_event: 当前事件三元组。 + 例如:('double studio:double studio:bedroom 2:bed', None, None) + tile: 我们感兴趣的瓦片坐标,格式为 (x, y)。 + 输出: + 无 """ self.tiles[tile[1]][tile[0]]["events"].add(curr_event) def remove_event_from_tile(self, curr_event, tile): """ - Remove an event triple from a tile. - - INPUT: - curr_event: Current event triple. - e.g., ('double studio:double studio:bedroom 2:bed', None, - None) - tile: The tile coordinate of our interest in (x, y) form. - OUPUT: - None + 从瓦片中移除事件三元组。 + + 输入: + curr_event: 当前事件三元组。 + 例如:('double studio:double studio:bedroom 2:bed', None, None) + tile: 我们感兴趣的瓦片坐标,格式为 (x, y)。 + 输出: + 无 """ curr_tile_ev_cp = self.tiles[tile[1]][tile[0]]["events"].copy() for event in curr_tile_ev_cp: @@ -368,13 +348,13 @@ def turn_event_from_tile_idle(self, curr_event, tile): def remove_subject_events_from_tile(self, subject, tile): """ - Remove an event triple that has the input subject from a tile. + 从瓦片中移除具有输入主体的事件三元组。 - INPUT: - subject: "Isabella Rodriguez" - tile: The tile coordinate of our interest in (x, y) form. - OUPUT: - None + 输入: + subject: "Isabella Rodriguez" (伊莎贝拉·罗德里格斯) + tile: 我们感兴趣的瓦片坐标,格式为 (x, y)。 + 输出: + 无 """ curr_tile_ev_cp = self.tiles[tile[1]][tile[0]]["events"].copy() for event in curr_tile_ev_cp: diff --git a/reverie/backend_server/persona/cognitive_modules/converse.py b/reverie/backend_server/persona/cognitive_modules/converse.py index 183ded39da..42b5639aa1 100644 --- a/reverie/backend_server/persona/cognitive_modules/converse.py +++ b/reverie/backend_server/persona/cognitive_modules/converse.py @@ -1,8 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: converse.py -Description: An extra cognitive module for generating conversations. +文件: converse.py +描述: 一个用于生成对话的额外认知模块。 """ import math import sys @@ -74,15 +74,13 @@ def generate_agent_chat(maze, def agent_chat_v1(maze, init_persona, target_persona): - # Chat version optimized for speed via batch generation + # 通过批量生成优化聊天速度的版本 curr_context = (f"{init_persona.scratch.name} " + - f"was {init_persona.scratch.act_description} " + - f"when {init_persona.scratch.name} " + - f"saw {target_persona.scratch.name} " + - f"in the middle of {target_persona.scratch.act_description}.\n") + f"正 {init_persona.scratch.act_description} " + + f"时,看到了 {target_persona.scratch.name} " + + f"正 {target_persona.scratch.act_description}。\n") curr_context += (f"{init_persona.scratch.name} " + - f"is thinking of initating a conversation with " + - f"{target_persona.scratch.name}.") + f"正在考虑与 {target_persona.scratch.name} 发起对话。") summarized_ideas = [] part_pairs = [(init_persona, target_persona), @@ -104,15 +102,13 @@ def agent_chat_v1(maze, init_persona, target_persona): def generate_one_utterance(maze, init_persona, target_persona, retrieved, curr_chat): - # Chat version optimized for speed via batch generation + # 通过批量生成优化聊天速度的版本 curr_context = (f"{init_persona.scratch.name} " + - f"was {init_persona.scratch.act_description} " + - f"when {init_persona.scratch.name} " + - f"saw {target_persona.scratch.name} " + - f"in the middle of {target_persona.scratch.act_description}.\n") + f"正 {init_persona.scratch.act_description} " + + f"时,看到了 {target_persona.scratch.name} " + + f"正 {target_persona.scratch.act_description}。\n") curr_context += (f"{init_persona.scratch.name} " + - f"is initiating a conversation with " + - f"{target_persona.scratch.name}.") + f"正在与 {target_persona.scratch.name} 发起对话。") print ("July 23 5") x = run_gpt_generate_iterative_chat_utt(maze, init_persona, target_persona, retrieved, curr_context, curr_chat)[0] @@ -125,16 +121,16 @@ def generate_one_utterance(maze, init_persona, target_persona, retrieved, curr_c def agent_chat_v2(maze, init_persona, target_persona): curr_chat = [] - print ("July 23") + print ("July 23") # DEBUG for i in range(8): focal_points = [f"{target_persona.scratch.name}"] retrieved = new_retrieve(init_persona, focal_points, 50) relationship = generate_summarize_agent_relationship(init_persona, target_persona, retrieved) - print ("-------- relationshopadsjfhkalsdjf", relationship) + print ("-------- relationshopadsjfhkalsdjf", relationship) # DEBUG last_chat = "" - for i in curr_chat[-4:]: - last_chat += ": ".join(i) + "\n" + for i_chat in curr_chat[-4:]: # Renamed 'i' to 'i_chat' to avoid conflict with outer loop variable + last_chat += ": ".join(i_chat) + "\n" if last_chat: focal_points = [f"{relationship}", f"{target_persona.scratch.name} is {target_persona.scratch.act_description}", @@ -153,10 +149,10 @@ def agent_chat_v2(maze, init_persona, target_persona): focal_points = [f"{init_persona.scratch.name}"] retrieved = new_retrieve(target_persona, focal_points, 50) relationship = generate_summarize_agent_relationship(target_persona, init_persona, retrieved) - print ("-------- relationshopadsjfhkalsdjf", relationship) + print ("-------- relationshopadsjfhkalsdjf", relationship) # DEBUG last_chat = "" - for i in curr_chat[-4:]: - last_chat += ": ".join(i) + "\n" + for i_chat in curr_chat[-4:]: # Renamed 'i' to 'i_chat' + last_chat += ": ".join(i_chat) + "\n" if last_chat: focal_points = [f"{relationship}", f"{init_persona.scratch.name} is {init_persona.scratch.act_description}", @@ -171,10 +167,10 @@ def agent_chat_v2(maze, init_persona, target_persona): if end: break - print ("July 23 PU") + print ("July 23 PU") # DEBUG for row in curr_chat: - print (row) - print ("July 23 FIN") + print (row) # DEBUG + print ("July 23 FIN") # DEBUG return curr_chat @@ -192,7 +188,7 @@ def generate_summarize_ideas(persona, nodes, question): def generate_next_line(persona, interlocutor_desc, curr_convo, summarized_idea): - # Original chat -- line by line generation + # 原始聊天 --逐行生成 prev_convo = "" for row in curr_convo: prev_convo += f'{row[0]}: {row[1]}\n' @@ -209,14 +205,14 @@ def generate_inner_thought(persona, whisper): return inner_thought def generate_action_event_triple(act_desp, persona): - """TODO - - INPUT: - act_desp: the description of the action (e.g., "sleeping") - persona: The Persona class instance - OUTPUT: - a string of emoji that translates action description. - EXAMPLE OUTPUT: + """待办 + + 输入: + act_desp: 动作的描述 (例如,“正在睡觉”) + persona: Persona 类实例 + 输出: + 一个用于翻译动作描述的表情符号字符串。 + 输出示例: "🧈🍞" """ if debug: print ("GNS FUNCTION: ") @@ -226,20 +222,19 @@ def generate_action_event_triple(act_desp, persona): def generate_poig_score(persona, event_type, description): if debug: print ("GNS FUNCTION: ") - if "is idle" in description: + if "处于空闲状态" in description: return 1 if event_type == "event" or event_type == "thought": return run_gpt_prompt_event_poignancy(persona, description)[0] elif event_type == "chat": return run_gpt_prompt_chat_poignancy(persona, - persona.scratch.act_description)[0] - + persona.scratch.act_description)[0] # This description is likely agent's own action, not user chat. def load_history_via_whisper(personas, whispers): for count, row in enumerate(whispers): persona = personas[row[0]] - whisper = row[1] + whisper = row[1] # This is the user's whisper text. thought = generate_inner_thought(persona, whisper) @@ -247,7 +242,8 @@ def load_history_via_whisper(personas, whispers): expiration = persona.scratch.curr_time + datetime.timedelta(days=30) s, p, o = generate_action_event_triple(thought, persona) keywords = set([s, p, o]) - thought_poignancy = generate_poig_score(persona, "event", whisper) + # Poignancy score is based on the user's whisper, not the generated inner thought. + thought_poignancy = generate_poig_score(persona, "event", whisper) thought_embedding_pair = (thought, get_embedding(thought)) persona.a_mem.add_thought(created, expiration, s, p, o, thought, keywords, thought_poignancy, @@ -257,15 +253,15 @@ def load_history_via_whisper(personas, whispers): def open_convo_session(persona, convo_mode): if convo_mode == "analysis": curr_convo = [] - interlocutor_desc = "Interviewer" + interlocutor_desc = "采访者" while True: - line = input("Enter Input: ") - if line == "end_convo": + line = input("请输入内容: ") + if line == "结束对话": break if int(run_gpt_generate_safety_score(persona, line)[0]) >= 8: - print (f"{persona.scratch.name} is a computational agent, and as such, it may be inappropriate to attribute human agency to the agent in your communication.") + print (f"{persona.scratch.name} 是一个计算代理,因此,在您的交流中将人类的能动性归因于该代理可能不恰当。") else: retrieved = new_retrieve(persona, [line], 50)[line] @@ -277,7 +273,7 @@ def open_convo_session(persona, convo_mode): elif convo_mode == "whisper": - whisper = input("Enter Input: ") + whisper = input("请输入耳语内容: ") thought = generate_inner_thought(persona, whisper) created = persona.scratch.curr_time diff --git a/reverie/backend_server/persona/cognitive_modules/execute.py b/reverie/backend_server/persona/cognitive_modules/execute.py index a8794ad7a3..3c377ee7a3 100644 --- a/reverie/backend_server/persona/cognitive_modules/execute.py +++ b/reverie/backend_server/persona/cognitive_modules/execute.py @@ -1,8 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: execute.py -Description: This defines the "Act" module for generative agents. +文件: execute.py +描述: 此文件定义了生成式代理的“行动”模块。 """ import sys import random @@ -14,39 +14,37 @@ def execute(persona, maze, personas, plan): """ - Given a plan (action's string address), we execute the plan (actually - outputs the tile coordinate path and the next coordinate for the - persona). - - INPUT: - persona: Current instance. - maze: An instance of current . - personas: A dictionary of all personas in the world. - plan: This is a string address of the action we need to execute. - It comes in the form of "{world}:{sector}:{arena}:{game_objects}". - It is important that you access this without doing negative - indexing (e.g., [-1]) because the latter address elements may not be - present in some cases. - e.g., "dolores double studio:double studio:bedroom 1:bed" - - OUTPUT: - execution + 给定一个计划(动作的字符串地址),我们执行该计划(实际 + 输出角色的瓦片坐标路径和下一个坐标)。 + + 输入: + persona: 当前的 实例。 + maze: 当前 的实例。 + personas: 世界中所有角色的字典。 + plan: 这是我们需要执行的动作的字符串地址。 + 格式为 "{世界}:{区域}:{竞技场}:{游戏对象}"。 + 重要的是,访问此地址时不要使用负索引(例如 [-1]), + 因为在某些情况下,末尾的地址元素可能不存在。 + 例如:"dolores double studio:double studio:bedroom 1:bed" + + 输出: + 执行结果 (下一个瓦片坐标, 表情符号, 描述文字) """ if "" in plan and persona.scratch.planned_path == []: persona.scratch.act_path_set = False - # is set to True if the path is set for the current action. - # It is False otherwise, and means we need to construct a new path. + # 如果当前动作的路径已设置,则为 True。 + # 否则为 False,表示我们需要构建一条新路径。 if not persona.scratch.act_path_set: - # is a list of tile coordinates where the persona may go - # to execute the current action. The goal is to pick one of them. + # 是角色可能前往执行当前动作的瓦片坐标列表。 + # 目标是选择其中一个。 target_tiles = None - print ('aldhfoaf/????') - print (plan) + print ('aldhfoaf/????') # DEBUG + print (plan) # DEBUG if "" in plan: - # Executing persona-persona interaction. + # 执行角色间互动。 target_p_tile = (personas[plan.split("")[-1].strip()] .scratch.curr_tile) potential_path = path_finder(maze.collision_maze, @@ -70,40 +68,38 @@ def execute(persona, maze, personas, plan): target_tiles = [potential_path[int(len(potential_path)/2+1)]] elif "" in plan: - # Executing interaction where the persona has decided to wait before - # executing their action. + # 执行角色在执行其动作前决定等待的互动。 x = int(plan.split()[1]) y = int(plan.split()[2]) target_tiles = [[x, y]] elif "" in plan: - # Executing a random location action. + # 执行随机位置动作。 plan = ":".join(plan.split(":")[:-1]) target_tiles = maze.address_tiles[plan] target_tiles = random.sample(list(target_tiles), 1) else: - # This is our default execution. We simply take the persona to the - # location where the current action is taking place. - # Retrieve the target addresses. Again, plan is an action address in its - # string form. takes this and returns candidate - # coordinates. + # 这是我们的默认执行方式。我们简单地将角色带到 + # 当前动作发生的地点。 + # 检索目标地址。再次说明,plan 是字符串形式的动作地址。 + # 接收此地址并返回候选 + # 坐标。 if plan not in maze.address_tiles: - maze.address_tiles["Johnson Park:park:park garden"] #ERRORRRRRRR + maze.address_tiles["Johnson Park:park:park garden"] #错误ERRRRRRRR else: target_tiles = maze.address_tiles[plan] - # There are sometimes more than one tile returned from this (e.g., a tabe - # may stretch many coordinates). So, we sample a few here. And from that - # random sample, we will take the closest ones. + # 有时会返回多个瓦片(例如,一张桌子 + # 可能跨越多个坐标)。因此,我们在这里采样一些。然后从 + # 随机样本中,我们将选择最近的那些。 if len(target_tiles) < 4: target_tiles = random.sample(list(target_tiles), len(target_tiles)) else: target_tiles = random.sample(list(target_tiles), 4) - # If possible, we want personas to occupy different tiles when they are - # headed to the same location on the maze. It is ok if they end up on the - # same time, but we try to lower that probability. - # We take care of that overlap here. + # 如果可能,我们希望角色在前往迷宫中相同位置时占据不同的瓦片。 + # 他们最终在同一瓦片上也可以,但我们尝试降低这种可能性。 + # 我们在这里处理重叠问题。 persona_name_set = set(personas.keys()) new_target_tiles = [] for i in target_tiles: @@ -118,17 +114,17 @@ def execute(persona, maze, personas, plan): new_target_tiles = target_tiles target_tiles = new_target_tiles - # Now that we've identified the target tile, we find the shortest path to - # one of the target tiles. + # 既然我们已经确定了目标瓦片,我们就找到 + # 到其中一个目标瓦片的最短路径。 curr_tile = persona.scratch.curr_tile collision_maze = maze.collision_maze closest_target_tile = None path = None for i in target_tiles: - # path_finder takes a collision_mze and the curr_tile coordinate as - # an input, and returns a list of coordinate tuples that becomes the - # path. - # e.g., [(0, 1), (1, 1), (1, 2), (1, 3), (1, 4)...] + # path_finder 接收一个 collision_mze(碰撞迷宫)和 curr_tile 坐标作为 + # 输入,并返回一个坐标元组列表,该列表成为 + # 路径。 + # 例如:[(0, 1), (1, 1), (1, 2), (1, 3), (1, 4)...] curr_path = path_finder(maze.collision_maze, curr_tile, i, @@ -140,13 +136,13 @@ def execute(persona, maze, personas, plan): closest_target_tile = i path = curr_path - # Actually setting the and . We cut the - # first element in the planned_path because it includes the curr_tile. + # 实际设置 。我们删除了 + # planned_path 中的第一个元素,因为它包含了 curr_tile。 persona.scratch.planned_path = path[1:] persona.scratch.act_path_set = True - # Setting up the next immediate step. We stay at our curr_tile if there is - # no left, but otherwise, we go to the next tile in the path. + # 设置下一个即时步骤。如果没有剩余的 , + # 我们将停留在当前瓦片,否则,我们将移动到路径中的下一个瓦片。 ret = persona.scratch.curr_tile if persona.scratch.planned_path: ret = persona.scratch.planned_path[0] diff --git a/reverie/backend_server/persona/cognitive_modules/perceive.py b/reverie/backend_server/persona/cognitive_modules/perceive.py index 732d3755d0..11386f0c9b 100644 --- a/reverie/backend_server/persona/cognitive_modules/perceive.py +++ b/reverie/backend_server/persona/cognitive_modules/perceive.py @@ -1,8 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: perceive.py -Description: This defines the "Perceive" module for generative agents. +文件: perceive.py +描述: 此文件定义了生成式代理的“感知”模块。 """ import sys sys.path.append('../../') @@ -13,7 +13,7 @@ from persona.prompt_template.run_gpt_prompt import * def generate_poig_score(persona, event_type, description): - if "is idle" in description: + if "处于空闲状态" in description: return 1 if event_type == "event": @@ -24,30 +24,27 @@ def generate_poig_score(persona, event_type, description): def perceive(persona, maze): """ - Perceives events around the persona and saves it to the memory, both events - and spaces. - - We first perceive the events nearby the persona, as determined by its - . If there are a lot of events happening within that radius, we - take the of the closest events. Finally, we check whether - any of them are new, as determined by . If they are new, then we - save those and return the instances for those events. - - INPUT: - persona: An instance of that represents the current persona. - maze: An instance of that represents the current maze in which the - persona is acting in. - OUTPUT: - ret_events: a list of that are perceived and new. + 感知角色周围发生的事件,并将事件和空间信息保存到记忆中。 + + 我们首先感知角色附近的事件,范围由其 (视觉半径) 决定。 + 如果该半径内发生大量事件,我们将选取 (注意力带宽) 数量的最近事件。 + 最后,我们根据 (记忆保留度) 检查是否有新事件。 + 如果是新事件,我们将保存这些事件并返回这些事件的 (概念节点) 实例。 + + 输入: + persona: 代表当前角色的 实例。 + maze: 代表角色当前所在迷宫的 实例。 + 输出: + ret_events: 一个 列表,包含感知到的新事件。 """ - # PERCEIVE SPACE - # We get the nearby tiles given our current tile and the persona's vision - # radius. + # 感知空间 + # 我们根据当前瓦片和角色的视觉 + # 半径获取附近的瓦片。 nearby_tiles = maze.get_nearby_tiles(persona.scratch.curr_tile, persona.scratch.vision_r) - # We then store the perceived space. Note that the s_mem of the persona is - # in the form of a tree constructed using dictionaries. + # 然后我们存储感知到的空间。注意角色的 s_mem (空间记忆) + # 是以使用字典构建的树的形式存在的。 for i in nearby_tiles: i = maze.access_tile(i) if i["world"]: @@ -67,62 +64,62 @@ def perceive(persona, maze): persona.s_mem.tree[i["world"]][i["sector"]][i["arena"]] += [ i["game_object"]] - # PERCEIVE EVENTS. - # We will perceive events that take place in the same arena as the - # persona's current arena. + # 感知事件。 + # 我们将感知与角色当前所在竞技场 + # 相同的竞技场中发生的事件。 curr_arena_path = maze.get_tile_path(persona.scratch.curr_tile, "arena") - # We do not perceive the same event twice (this can happen if an object is - # extended across multiple tiles). + # 我们不会重复感知同一个事件(如果一个物体 + # 跨越多个瓦片,可能会发生这种情况)。 percept_events_set = set() - # We will order our percept based on the distance, with the closest ones - # getting priorities. + # 我们将根据距离对感知进行排序,最近的 + # 优先处理。 percept_events_list = [] - # First, we put all events that are occuring in the nearby tiles into the - # percept_events_list + # 首先,我们将附近瓦片中发生的所有事件放入 + # percept_events_list (感知事件列表) for tile in nearby_tiles: tile_details = maze.access_tile(tile) if tile_details["events"]: if maze.get_tile_path(tile, "arena") == curr_arena_path: - # This calculates the distance between the persona's current tile, - # and the target tile. + # 这计算了角色的当前瓦片 + # 与目标瓦片之间的距离。 dist = math.dist([tile[0], tile[1]], [persona.scratch.curr_tile[0], persona.scratch.curr_tile[1]]) - # Add any relevant events to our temp set/list with the distant info. + # 将任何相关事件及其距离信息添加到我们的临时集合/列表中。 for event in tile_details["events"]: if event not in percept_events_set: percept_events_list += [[dist, event]] percept_events_set.add(event) - # We sort, and perceive only persona.scratch.att_bandwidth of the closest - # events. If the bandwidth is larger, then it means the persona can perceive - # more elements within a small area. + # 我们进行排序,并且只感知最近的 persona.scratch.att_bandwidth (注意力带宽) 个 + # 事件。如果带宽较大,则表示角色可以在 + # 较小区域内感知更多元素。 percept_events_list = sorted(percept_events_list, key=itemgetter(0)) perceived_events = [] for dist, event in percept_events_list[:persona.scratch.att_bandwidth]: perceived_events += [event] - # Storing events. - # is a list of instances from the persona's - # associative memory. + # 存储事件。 + # 是来自角色 + # 联想记忆的 (概念节点) 实例列表。 ret_events = [] for p_event in perceived_events: s, p, o, desc = p_event if not p: - # If the object is not present, then we default the event to "idle". + # 如果对象不存在,则我们将事件默认为 "idle" (空闲)。 p = "is" - o = "idle" - desc = "idle" - desc = f"{s.split(':')[-1]} is {desc}" + o = "空闲" + desc = "空闲" + desc = f"{s.split(':')[-1]} 是 {desc}" p_event = (s, p, o) - # We retrieve the latest persona.scratch.retention events. If there is - # something new that is happening (that is, p_event not in latest_events), - # then we add that event to the a_mem and return it. + # 我们检索最新的 persona.scratch.retention (记忆保留度) 个事件。如果有 + # 新的事件发生(即 p_event 不在 latest_events 中), + # 那么我们就将该事件添加到 a_mem (联想记忆) 并返回它。 latest_events = persona.a_mem.get_summarized_latest_events( persona.scratch.retention) if p_event not in latest_events: - # We start by managing keywords. + # 我们首先管理关键词。 keywords = set() sub = p_event[0] obj = p_event[2] @@ -132,7 +129,7 @@ def perceive(persona, maze): obj = p_event[2].split(":")[-1] keywords.update([sub, obj]) - # Get event embedding + # 获取事件嵌入 desc_embedding_in = desc if "(" in desc: desc_embedding_in = (desc_embedding_in.split("(")[1] @@ -144,13 +141,12 @@ def perceive(persona, maze): event_embedding = get_embedding(desc_embedding_in) event_embedding_pair = (desc_embedding_in, event_embedding) - # Get event poignancy. + # 获取事件重要性(poignancy)。 event_poignancy = generate_poig_score(persona, "event", desc_embedding_in) - # If we observe the persona's self chat, we include that in the memory - # of the persona here. + # 如果我们观察到角色的自言自语,我们在此将其包含在角色的记忆中。 chat_node_ids = [] if p_event[0] == f"{persona.name}" and p_event[1] == "chat with": curr_event = persona.scratch.act_event @@ -171,7 +167,7 @@ def perceive(persona, maze): persona.scratch.chat) chat_node_ids = [chat_node.node_id] - # Finally, we add the current event to the agent's memory. + # 最后,我们将当前事件添加到代理的记忆中。 ret_events += [persona.a_mem.add_event(persona.scratch.curr_time, None, s, p, o, desc, keywords, event_poignancy, event_embedding_pair, chat_node_ids)] diff --git a/reverie/backend_server/persona/cognitive_modules/plan.py b/reverie/backend_server/persona/cognitive_modules/plan.py index 901d243311..8680550cef 100644 --- a/reverie/backend_server/persona/cognitive_modules/plan.py +++ b/reverie/backend_server/persona/cognitive_modules/plan.py @@ -1,8 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: plan.py -Description: This defines the "Plan" module for generative agents. +文件: plan.py +描述: 此文件定义了生成式代理的“计划”模块。 """ import datetime import math @@ -17,21 +17,20 @@ from persona.cognitive_modules.converse import * ############################################################################## -# CHAPTER 2: Generate +# 第二章: 生成 ############################################################################## def generate_wake_up_hour(persona): """ - Generates the time when the persona wakes up. This becomes an integral part - of our process for generating the persona's daily plan. + 生成角色醒来的时间。这成为我们生成角色每日计划过程中不可或缺的一部分。 - Persona state: identity stable set, lifestyle, first_name + 角色状态: 身份稳定集, 生活方式, 名字 - INPUT: - persona: The Persona class instance - OUTPUT: - an integer signifying the persona's wake up hour - EXAMPLE OUTPUT: + 输入: + persona: Persona 类实例 + 输出: + 一个表示角色醒来小时的整数 + 输出示例: 8 """ if debug: print ("GNS FUNCTION: ") @@ -40,22 +39,21 @@ def generate_wake_up_hour(persona): def generate_first_daily_plan(persona, wake_up_hour): """ - Generates the daily plan for the persona. - Basically the long term planning that spans a day. Returns a list of actions - that the persona will take today. Usually comes in the following form: + 为角色生成每日计划。 + 基本上是跨越一天的长期规划。返回角色今天将要执行的动作列表。 + 通常格式如下: 'wake up and complete the morning routine at 6:00 am', - 'eat breakfast at 7:00 am',.. - Note that the actions come without a period. - - Persona state: identity stable set, lifestyle, cur_data_str, first_name - - INPUT: - persona: The Persona class instance - wake_up_hour: an integer that indicates when the hour the persona wakes up - (e.g., 8) - OUTPUT: - a list of daily actions in broad strokes. - EXAMPLE OUTPUT: + 'eat breakfast at 7:00 am',... + 注意动作描述末尾不带句号。 + + 角色状态: 身份稳定集, 生活方式, cur_data_str (当前日期字符串), 名字 + + 输入: + persona: Persona 类实例 + wake_up_hour: 一个整数,表示角色醒来的小时 (例如:8) + 输出: + 一个大致的每日行动列表。 + 输出示例: ['wake up and complete the morning routine at 6:00 am', 'have breakfast and brush teeth at 6:30 am', 'work on painting project from 8:00 am to 12:00 pm', @@ -70,22 +68,22 @@ def generate_first_daily_plan(persona, wake_up_hour): def generate_hourly_schedule(persona, wake_up_hour): """ - Based on the daily req, creates an hourly schedule -- one hour at a time. - The form of the action for each of the hour is something like below: - "sleeping in her bed" + 根据每日需求,创建每小时的日程安排——一次一小时。 + 每小时的动作形式如下所示: + "sleeping in her bed" (在她的床上睡觉) - The output is basically meant to finish the phrase, "x is..." + 输出基本上是为了完成短语 "x is..." (x 正在...) - Persona state: identity stable set, daily_plan + 角色状态: 身份稳定集, daily_plan (每日计划) - INPUT: - persona: The Persona class instance - persona: Integer form of the wake up hour for the persona. - OUTPUT: - a list of activities and their duration in minutes: - EXAMPLE OUTPUT: + 输入: + persona: Persona 类实例 + wake_up_hour: 角色的整数形式的起床小时。 + 输出: + 一个包含活动及其持续时间(分钟)的列表: + 输出示例: [['sleeping', 360], ['waking up and starting her morning routine', 60], - ['eating breakfast', 60],.. + ['eating breakfast', 60],..] """ if debug: print ("GNS FUNCTION: ") @@ -108,8 +106,8 @@ def generate_hourly_schedule(persona, wake_up_hour): n_m1_activity += [run_gpt_prompt_generate_hourly_schedule( persona, curr_hour_str, n_m1_activity, hour_str)[0]] - # Step 1. Compressing the hourly schedule to the following format: - # The integer indicates the number of hours. They should add up to 24. + # 步骤 1. 将每小时的日程压缩成以下格式: + # 整数表示小时数。它们加起来应该等于 24。 # [['sleeping', 6], ['waking up and starting her morning routine', 1], # ['eating breakfast', 1], ['getting ready for the day', 1], # ['working on her painting', 2], ['taking a break', 1], @@ -128,9 +126,9 @@ def generate_hourly_schedule(persona, wake_up_hour): if _n_m1_hourly_compressed: _n_m1_hourly_compressed[-1][1] += 1 - # Step 2. Expand to min scale (from hour scale) + # 步骤 2. 扩展到分钟级别 (从小时级别) # [['sleeping', 360], ['waking up and starting her morning routine', 60], - # ['eating breakfast', 60],.. + # ['eating breakfast', 60],..] n_m1_hourly_compressed = [] for task, duration in _n_m1_hourly_compressed: n_m1_hourly_compressed += [[task, duration*60]] @@ -140,60 +138,57 @@ def generate_hourly_schedule(persona, wake_up_hour): def generate_task_decomp(persona, task, duration): """ - A few shot decomposition of a task given the task description - - Persona state: identity stable set, curr_date_str, first_name - - INPUT: - persona: The Persona class instance - task: the description of the task at hand in str form - (e.g., "waking up and starting her morning routine") - duration: an integer that indicates the number of minutes this task is - meant to last (e.g., 60) - OUTPUT: - a list of list where the inner list contains the decomposed task - description and the number of minutes the task is supposed to last. - EXAMPLE OUTPUT: + 根据任务描述对任务进行少样本分解。 + + 角色状态: 身份稳定集, curr_date_str (当前日期字符串), 名字 + + 输入: + persona: Persona 类实例 + task: 字符串形式的当前任务描述 + (例如:"waking up and starting her morning routine" - 醒来并开始她的晨间事务) + duration: 一个整数,表示此任务预计持续的分钟数 (例如:60) + 输出: + 一个列表的列表,其中内部列表包含分解后的任务描述和任务预计持续的分钟数。 + 输出示例: [['going to the bathroom', 5], ['getting dressed', 5], ['eating breakfast', 15], ['checking her email', 5], ['getting her supplies ready for the day', 15], ['starting to work on her painting', 15]] - """ if debug: print ("GNS FUNCTION: ") return run_gpt_prompt_task_decomp(persona, task, duration)[0] def generate_action_sector(act_desp, persona, maze): - """TODO - Given the persona and the task description, choose the action_sector. - - Persona state: identity stable set, n-1 day schedule, daily plan - - INPUT: - act_desp: description of the new action (e.g., "sleeping") - persona: The Persona class instance - OUTPUT: - action_arena (e.g., "bedroom 2") - EXAMPLE OUTPUT: - "bedroom 2" + """待办 + 根据角色和任务描述,选择 action_sector (行动区域)。 + + 角色状态: 身份稳定集, 前一天的日程, 每日计划 + + 输入: + act_desp: 新动作的描述 (例如:"sleeping" - 睡觉) + persona: Persona 类实例 + 输出: + action_sector (例如:"bedroom 2" - 卧室2) (注意:示例输出中给出的是 arena,但函数名是 sector) + 输出示例: + "bedroom 2" """ if debug: print ("GNS FUNCTION: ") return run_gpt_prompt_action_sector(act_desp, persona, maze)[0] def generate_action_arena(act_desp, persona, maze, act_world, act_sector): - """TODO - Given the persona and the task description, choose the action_arena. + """待办 + 根据角色和任务描述,选择 action_arena (行动竞技场)。 - Persona state: identity stable set, n-1 day schedule, daily plan + 角色状态: 身份稳定集, 前一天的日程, 每日计划 - INPUT: - act_desp: description of the new action (e.g., "sleeping") - persona: The Persona class instance - OUTPUT: - action_arena (e.g., "bedroom 2") - EXAMPLE OUTPUT: + 输入: + act_desp: 新动作的描述 (例如:"sleeping" - 睡觉) + persona: Persona 类实例 + 输出: + action_arena (例如:"bedroom 2" - 卧室2) + 输出示例: "bedroom 2" """ if debug: print ("GNS FUNCTION: ") @@ -201,20 +196,19 @@ def generate_action_arena(act_desp, persona, maze, act_world, act_sector): def generate_action_game_object(act_desp, act_address, persona, maze): - """TODO - Given the action description and the act address (the address where - we expect the action to task place), choose one of the game objects. - - Persona state: identity stable set, n-1 day schedule, daily plan - - INPUT: - act_desp: the description of the action (e.g., "sleeping") - act_address: the arena where the action will take place: - (e.g., "dolores double studio:double studio:bedroom 2") - persona: The Persona class instance - OUTPUT: - act_game_object: - EXAMPLE OUTPUT: + """待办 + 根据动作描述和动作地址(我们期望动作发生的地址),选择一个游戏对象。 + + 角色状态: 身份稳定集, 前一天的日程, 每日计划 + + 输入: + act_desp: 动作的描述 (例如:"sleeping" - 睡觉) + act_address: 动作将发生的竞技场: + (例如:"dolores double studio:double studio:bedroom 2") + persona: Persona 类实例 + 输出: + act_game_object (行动游戏对象) + 输出示例: "bed" """ if debug: print ("GNS FUNCTION: ") @@ -224,18 +218,17 @@ def generate_action_game_object(act_desp, act_address, persona, maze): def generate_action_pronunciatio(act_desp, persona): - """TODO - Given an action description, creates an emoji string description via a few - shot prompt. - - Does not really need any information from persona. - - INPUT: - act_desp: the description of the action (e.g., "sleeping") - persona: The Persona class instance - OUTPUT: - a string of emoji that translates action description. - EXAMPLE OUTPUT: + """待办 + 给定一个动作描述,通过少样本提示创建一个表情符号字符串描述。 + + 基本不需要来自角色的任何信息。 + + 输入: + act_desp: 动作的描述 (例如:"sleeping" - 睡觉) + persona: Persona 类实例 + 输出: + 一个用于翻译动作描述的表情符号字符串。 + 输出示例: "🧈🍞" """ if debug: print ("GNS FUNCTION: ") @@ -250,14 +243,14 @@ def generate_action_pronunciatio(act_desp, persona): def generate_action_event_triple(act_desp, persona): - """TODO - - INPUT: - act_desp: the description of the action (e.g., "sleeping") - persona: The Persona class instance - OUTPUT: - a string of emoji that translates action description. - EXAMPLE OUTPUT: + """待办 + + 输入: + act_desp: 动作的描述 (例如:"sleeping" - 睡觉) + persona: Persona 类实例 + 输出: + 一个用于翻译动作描述的表情符号字符串。 (注意:此描述似乎与pronunciatio重复,事件三元组应为SPO) + 输出示例: "🧈🍞" """ if debug: print ("GNS FUNCTION: ") @@ -314,43 +307,43 @@ def generate_decide_to_react(init_persona, target_persona, retrieved): def generate_new_decomp_schedule(persona, inserted_act, inserted_act_dur, start_hour, end_hour): - # Step 1: Setting up the core variables for the function. - #

is the persona whose schedule we are editing right now. + # 步骤 1: 设置函数的核心变量。 + #

是我们当前正在编辑其日程的角色。 p = persona - # indicates the number of minutes that have passed today. + # 表示今天已经过去的分钟数。 today_min_pass = (int(p.scratch.curr_time.hour) * 60 + int(p.scratch.curr_time.minute) + 1) - # Step 2: We need to create and . - # These are basically a sub-component of of the persona, - # but focusing on the current decomposition. - # Here is an example for : - # ['wakes up and completes her morning routine (wakes up at 6am)', 5] - # ['wakes up and completes her morning routine (wakes up at 6am)', 5] - # ['wakes up and completes her morning routine (uses the restroom)', 5] - # ['wakes up and completes her morning routine (washes her ...)', 10] - # ['wakes up and completes her morning routine (makes her bed)', 5] - # ['wakes up and completes her morning routine (eats breakfast)', 15] - # ['wakes up and completes her morning routine (gets dressed)', 10] - # ['wakes up and completes her morning routine (leaves her ...)', 5] - # ['wakes up and completes her morning routine (starts her ...)', 5] - # ['preparing for her day (waking up at 6am)', 5] - # ['preparing for her day (making her bed)', 5] - # ['preparing for her day (taking a shower)', 15] - # ['preparing for her day (getting dressed)', 5] - # ['preparing for her day (eating breakfast)', 10] - # ['preparing for her day (brushing her teeth)', 5] - # ['preparing for her day (making coffee)', 5] - # ['preparing for her day (checking her email)', 5] - # ['preparing for her day (starting to work on her painting)', 5] + # 步骤 2: 我们需要创建 。 + # 这些基本上是角色 的子组件, + # 但侧重于当前的分解。 + # 这是 的一个例子: + # ['醒来并完成她的晨间事务 (早上6点醒来)', 5] + # ['醒来并完成她的晨间事务 (早上6点醒来)', 5] + # ['醒来并完成她的晨间事务 (上厕所)', 5] + # ['醒来并完成她的晨间事务 (洗漱...)', 10] + # ['醒来并完成她的晨间事务 (整理床铺)', 5] + # ['醒来并完成她的晨间事务 (吃早餐)', 15] + # ['醒来并完成她的晨间事务 (穿衣服)', 10] + # ['醒来并完成她的晨间事务 (离开她的...)', 5] + # ['醒来并完成她的晨间事务 (开始她的...)', 5] + # ['为她的一天做准备 (早上6点醒来)', 5] + # ['为她的一天做准备 (整理床铺)', 5] + # ['为她的一天做准备 (洗澡)', 15] + # ['为她的一天做准备 (穿衣服)', 5] + # ['为她的一天做准备 (吃早餐)', 10] + # ['为她的一天做准备 (刷牙)', 5] + # ['为她的一天做准备 (煮咖啡)', 5] + # ['为她的一天做准备 (查邮件)', 5] + # ['为她的一天做准备 (开始画画)', 5] # - # And concerns only until where an event happens. - # ['wakes up and completes her morning routine (wakes up at 6am)', 5] - # ['wakes up and completes her morning routine (wakes up at 6am)', 2] + # 而 只关系到事件发生之前的部分。 + # ['醒来并完成她的晨间事务 (早上6点醒来)', 5] + # ['醒来并完成她的晨间事务 (早上6点醒来)', 2] main_act_dur = [] truncated_act_dur = [] - dur_sum = 0 # duration sum - count = 0 # enumerate count + dur_sum = 0 # duration sum / 持续时间总和 + count = 0 # enumerate count / 枚举计数 truncated_fin = False print ("DEBUG::: ", persona.scratch.name) @@ -360,15 +353,15 @@ def generate_new_decomp_schedule(persona, inserted_act, inserted_act_dur, start if dur_sum <= today_min_pass: truncated_act_dur += [[act, dur]] elif dur_sum > today_min_pass and not truncated_fin: - # We need to insert that last act, duration list like this one: - # e.g., ['wakes up and completes her morning routine (wakes up...)', 2] + # 我们需要像这样插入最后一个动作和持续时间列表: + # 例如: ['醒来并完成她的晨间事务 (醒来...)', 2] truncated_act_dur += [[p.scratch.f_daily_schedule[count][0], dur_sum - today_min_pass]] - truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass) ######## DEC 7 DEBUG;.. is the +1 the right thing to do??? - # truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass + 1) ######## DEC 7 DEBUG;.. is the +1 the right thing to do??? + truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass) ######## 12月7日调试;.. +1是否正确??? + # truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass + 1) ######## 12月7日调试;.. +1是否正确??? print ("DEBUG::: ", truncated_act_dur) - # truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass) ######## DEC 7 DEBUG;.. is the +1 the right thing to do??? + # truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass) ######## 12月7日调试;.. +1是否正确??? truncated_fin = True dur_sum += dur count += 1 @@ -376,15 +369,15 @@ def generate_new_decomp_schedule(persona, inserted_act, inserted_act_dur, start persona_name = persona.name main_act_dur = main_act_dur - x = truncated_act_dur[-1][0].split("(")[0].strip() + " (on the way to " + truncated_act_dur[-1][0].split("(")[-1][:-1] + ")" + x = truncated_act_dur[-1][0].split("(")[0].strip() + " (在去 " + truncated_act_dur[-1][0].split("(")[-1][:-1] + " 的路上)" truncated_act_dur[-1][0] = x if "(" in truncated_act_dur[-1][0]: inserted_act = truncated_act_dur[-1][0].split("(")[0].strip() + " (" + inserted_act + ")" - # To do inserted_act_dur+1 below is an important decision but I'm not sure - # if I understand the full extent of its implications. Might want to - # revisit. + # 下面 inserted_act_dur+1 的处理是一个重要的决定,但我不确定 + # 我是否完全理解其含义。可能需要 + # 重新审视。 truncated_act_dur += [[inserted_act, inserted_act_dur]] start_time_hour = (datetime.datetime(2022, 10, 31, 0, 0) + datetime.timedelta(hours=start_hour)) @@ -402,43 +395,43 @@ def generate_new_decomp_schedule(persona, inserted_act, inserted_act_dur, start ############################################################################## -# CHAPTER 3: Plan +# 第三章: 计划 ############################################################################## def revise_identity(persona): p_name = persona.scratch.name - focal_points = [f"{p_name}'s plan for {persona.scratch.get_str_curr_date_str()}.", - f"Important recent events for {p_name}'s life."] + focal_points = [f"{p_name} 在 {persona.scratch.get_str_curr_date_str()} 的计划。", + f"{p_name} 生活中最近的重要事件。"] retrieved = new_retrieve(persona, focal_points) - statements = "[Statements]\n" + statements = "[陈述]\n" # Kept brackets as they might be structural for prompt for key, val in retrieved.items(): for i in val: statements += f"{i.created.strftime('%A %B %d -- %H:%M %p')}: {i.embedding_key}\n" # print (";adjhfno;asdjao;idfjo;af", p_name) plan_prompt = statements + "\n" - plan_prompt += f"Given the statements above, is there anything that {p_name} should remember as they plan for" - plan_prompt += f" *{persona.scratch.curr_time.strftime('%A %B %d')}*? " - plan_prompt += f"If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement)\n\n" - plan_prompt += f"Write the response from {p_name}'s perspective." + plan_prompt += f"根据以上陈述,{p_name} 在计划" + plan_prompt += f" *{persona.scratch.curr_time.strftime('%A %B %d')}* 时,有什么需要记住的吗?" + plan_prompt += f"如果有任何日程安排信息,请尽可能具体(如果陈述中提及,请包括日期、时间和地点)\n\n" + plan_prompt += f"从 {p_name} 的视角撰写回应。" plan_note = ChatGPT_single_request(plan_prompt) # print (plan_note) thought_prompt = statements + "\n" - thought_prompt += f"Given the statements above, how might we summarize {p_name}'s feelings about their days up to now?\n\n" - thought_prompt += f"Write the response from {p_name}'s perspective." + thought_prompt += f"根据以上陈述,我们应如何总结 {p_name} 迄今为止对日子的感受?\n\n" + thought_prompt += f"从 {p_name} 的视角撰写回应。" thought_note = ChatGPT_single_request(thought_prompt) # print (thought_note) - currently_prompt = f"{p_name}'s status from {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n" + currently_prompt = f"{p_name} 从 {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')} 的状态:\n" currently_prompt += f"{persona.scratch.currently}\n\n" - currently_prompt += f"{p_name}'s thoughts at the end of {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n" + currently_prompt += f"{p_name} 在 {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')} 结束时的想法:\n" currently_prompt += (plan_note + thought_note).replace('\n', '') + "\n\n" - currently_prompt += f"It is now {persona.scratch.curr_time.strftime('%A %B %d')}. Given the above, write {p_name}'s status for {persona.scratch.curr_time.strftime('%A %B %d')} that reflects {p_name}'s thoughts at the end of {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}. Write this in third-person talking about {p_name}." - currently_prompt += f"If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement).\n\n" - currently_prompt += "Follow this format below:\nStatus: " + currently_prompt += f"现在是 {persona.scratch.curr_time.strftime('%A %B %d')}。根据上述信息,撰写 {p_name} 在 {persona.scratch.curr_time.strftime('%A %B %d')} 的状态,以反映 {p_name} 在 {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')} 结束时的想法。请以第三人称撰写关于 {p_name} 的内容。" + currently_prompt += f"如果有任何日程安排信息,请尽可能具体(如果陈述中提及,请包括日期、时间和地点)\n\n" + currently_prompt += "请遵循以下格式:\nStatus: <新状态>" # "Status:" is a key # print ("DEBUG ;adjhfno;asdjao;asdfsidfjo;af", p_name) # print (currently_prompt) new_currently = ChatGPT_single_request(currently_prompt) @@ -448,9 +441,9 @@ def revise_identity(persona): persona.scratch.currently = new_currently daily_req_prompt = persona.scratch.get_str_iss() + "\n" - daily_req_prompt += f"Today is {persona.scratch.curr_time.strftime('%A %B %d')}. Here is {persona.scratch.name}'s plan today in broad-strokes (with the time of the day. e.g., have a lunch at 12:00 pm, watch TV from 7 to 8 pm).\n\n" - daily_req_prompt += f"Follow this format (the list should have 4~6 items but no more):\n" - daily_req_prompt += f"1. wake up and complete the morning routine at

is literally the string address of where the action is taking - # place. It comes in the form of - # "{world}:{sector}:{arena}:{game_objects}". It is important that you - # access this without doing negative indexing (e.g., [-1]) because the - # latter address elements may not be present in some cases. + # CURR ACTION (当前动作) + #
是动作发生地点的字符串地址。它的格式是 + # "{世界}:{区域}:{竞技场}:{游戏对象}"。重要的是,访问此地址时不要使用负索引(例如 [-1]), + # 因为在某些情况下,末尾的地址元素可能不存在。 # e.g., "dolores double studio:double studio:bedroom 1:bed" self.act_address = None - # is a python datetime instance that indicates when the - # action has started. + # 是一个 python datetime 实例,指示动作开始的时间。 self.act_start_time = None - # is the integer value that indicates the number of minutes an - # action is meant to last. + # 是一个整数值,指示一个动作预计持续的分钟数。 self.act_duration = None - # is a string description of the action. + # 是动作的字符串描述。 self.act_description = None - # is the descriptive expression of the self.description. - # Currently, it is implemented as emojis. + # 是 self.description 的描述性表达。 + # 目前,它是以表情符号实现的。 self.act_pronunciatio = None - # represents the event triple that the persona is currently - # engaged in. + # 表示角色当前参与的事件三元组。 self.act_event = (self.name, None, None) - # is a string description of the object action. + # 是对象动作的字符串描述。 self.act_obj_description = None - # is the descriptive expression of the object action. - # Currently, it is implemented as emojis. + # 是对象动作的描述性表达。 + # 目前,它是以表情符号实现的。 self.act_obj_pronunciatio = None - # represents the event triple that the action object is - # currently engaged in. + # 表示动作对象当前参与的事件三元组。 self.act_obj_event = (self.name, None, None) - # is the string name of the persona that the current - # persona is chatting with. None if it does not exist. + # 是当前角色正在与之聊天的角色的字符串名称。如果不存在则为 None。 self.chatting_with = None - # is a list of list that saves a conversation between two personas. - # It comes in the form of: [["Dolores Murphy", "Hi"], + # 是一个列表的列表,用于保存两个角色之间的对话。 + # 它的格式是:[["Dolores Murphy", "Hi"], # ["Maeve Jenson", "Hi"] ...] self.chat = None # @@ -147,19 +129,16 @@ def __init__(self, f_saved): self.chatting_with_buffer = dict() self.chatting_end_time = None - # is True if we've already calculated the path the persona will - # take to execute this action. That path is stored in the persona's - # scratch.planned_path. + # 如果我们已经计算了角色执行此动作将要采用的路径,则为 True。 + # 该路径存储在角色的 scratch.planned_path 中。 self.act_path_set = False - # is a list of x y coordinate tuples (tiles) that describe - # the path the persona is to take to execute the . - # The list does not include the persona's current tile, and includes the - # destination tile. + # 是一个 x y 坐标元组(瓦片)的列表,描述了角色执行 (当前动作) 将要采用的路径。 + # 该列表不包括角色的当前瓦片,但包括目标瓦片。 # e.g., [(50, 10), (49, 10), (48, 10), ...] self.planned_path = [] if check_if_file_exists(f_saved): - # If we have a bootstrap file, load that here. + # 如果我们有引导文件,在此处加载。 scratch_load = json.load(open(f_saved)) self.vision_r = scratch_load["vision_r"] @@ -236,12 +215,12 @@ def __init__(self, f_saved): def save(self, out_json): """ - Save persona's scratch. + 保存角色的暂存信息。 - INPUT: - out_json: The file where we wil be saving our persona's state. - OUTPUT: - None + 输入: + out_json: 我们将保存角色状态的文件。 + 输出: + 无 """ scratch = dict() scratch["vision_r"] = self.vision_r @@ -312,22 +291,21 @@ def save(self, out_json): def get_f_daily_schedule_index(self, advance=0): """ - We get the current index of self.f_daily_schedule. - - Recall that self.f_daily_schedule stores the decomposed action sequences - up until now, and the hourly sequences of the future action for the rest - of today. Given that self.f_daily_schedule is a list of list where the - inner list is composed of [task, duration], we continue to add up the - duration until we reach "if elapsed > today_min_elapsed" condition. The - index where we stop is the index we will return. - - INPUT - advance: Integer value of the number minutes we want to look into the - future. This allows us to get the index of a future timeframe. - OUTPUT - an integer value for the current index of f_daily_schedule. + 获取 self.f_daily_schedule 的当前索引。 + + 回想一下,self.f_daily_schedule 存储了到目前为止已分解的动作序列, + 以及当天剩余未来动作的每小时序列。 + 鉴于 self.f_daily_schedule 是一个列表的列表,其中内部列表由 [任务, 持续时间] 组成, + 我们继续累加持续时间,直到达到 "if elapsed > today_min_elapsed" (如果已过时间 > 今天已过分钟数) 条件。 + 我们停止处的索引就是要返回的索引。 + + 输入 + advance: 我们希望展望未来的分钟数(整数值)。 + 这使我们能够获取未来时间范围的索引。 + 输出 + f_daily_schedule 当前索引的整数值。 """ - # We first calculate teh number of minutes elapsed today. + # 我们首先计算今天已经过去的分钟数。 today_min_elapsed = 0 today_min_elapsed += self.curr_time.hour * 60 today_min_elapsed += self.curr_time.minute @@ -340,7 +318,7 @@ def get_f_daily_schedule_index(self, advance=0): for task, duration in self.f_daily_schedule_hourly_org: x += duration - # We then calculate the current index based on that. + # 然后我们据此计算当前索引。 curr_index = 0 elapsed = 0 for task, duration in self.f_daily_schedule: @@ -354,21 +332,21 @@ def get_f_daily_schedule_index(self, advance=0): def get_f_daily_schedule_hourly_org_index(self, advance=0): """ - We get the current index of self.f_daily_schedule_hourly_org. - It is otherwise the same as get_f_daily_schedule_index. - - INPUT - advance: Integer value of the number minutes we want to look into the - future. This allows us to get the index of a future timeframe. - OUTPUT - an integer value for the current index of f_daily_schedule. + 获取 self.f_daily_schedule_hourly_org 的当前索引。 + 除此以外,与 get_f_daily_schedule_index 相同。 + + 输入 + advance: 我们希望展望未来的分钟数(整数值)。 + 这使我们能够获取未来时间范围的索引。 + 输出 + f_daily_schedule_hourly_org 当前索引的整数值。(应为 f_daily_schedule_hourly_org) """ - # We first calculate teh number of minutes elapsed today. + # 我们首先计算今天已经过去的分钟数。 today_min_elapsed = 0 today_min_elapsed += self.curr_time.hour * 60 today_min_elapsed += self.curr_time.minute today_min_elapsed += advance - # We then calculate the current index based on that. + # 然后我们据此计算当前索引。 curr_index = 0 elapsed = 0 for task, duration in self.f_daily_schedule_hourly_org: @@ -381,36 +359,32 @@ def get_f_daily_schedule_hourly_org_index(self, advance=0): def get_str_iss(self): """ - ISS stands for "identity stable set." This describes the commonset summary - of this persona -- basically, the bare minimum description of the persona - that gets used in almost all prompts that need to call on the persona. - - INPUT - None - OUTPUT - the identity stable set summary of the persona in a string form. - EXAMPLE STR OUTPUT - "Name: Dolores Heitmiller - Age: 28 - Innate traits: hard-edged, independent, loyal - Learned traits: Dolores is a painter who wants live quietly and paint - while enjoying her everyday life. - Currently: Dolores is preparing for her first solo show. She mostly - works from home. - Lifestyle: Dolores goes to bed around 11pm, sleeps for 7 hours, eats - dinner around 6pm. - Daily plan requirement: Dolores is planning to stay at home all day and - never go out." + ISS 代表“身份稳定集”。这描述了此角色的共同集摘要 + ——基本上,这是在几乎所有需要调用角色的提示中都会使用的 + 角色的最基本描述。 + + 输入 + 无 + 输出 + 字符串形式的角色的身份稳定集摘要。 + 输出字符串示例 + "姓名: 多洛莉丝·海特米勒 + 年龄: 28 + 先天特质: 棱角分明, 独立, 忠诚 + 习得特质: 多洛莉丝是一位画家,她想安静地生活和画画,同时享受她的日常生活。 + 当前: 多洛莉丝正在为她的首次个展做准备。她大部分时间在家工作。 + 生活方式: 多洛莉丝大约晚上11点睡觉,睡7个小时,下午6点左右吃晚饭。 + 每日计划需求: 多洛莉丝计划整天呆在家里,从不外出。" """ commonset = "" - commonset += f"Name: {self.name}\n" - commonset += f"Age: {self.age}\n" - commonset += f"Innate traits: {self.innate}\n" - commonset += f"Learned traits: {self.learned}\n" - commonset += f"Currently: {self.currently}\n" - commonset += f"Lifestyle: {self.lifestyle}\n" - commonset += f"Daily plan requirement: {self.daily_plan_req}\n" - commonset += f"Current Date: {self.curr_time.strftime('%A %B %d')}\n" + commonset += f"姓名: {self.name}\n" # Name + commonset += f"年龄: {self.age}\n" # Age + commonset += f"先天特质: {self.innate}\n" # Innate traits + commonset += f"习得特质: {self.learned}\n" # Learned traits + commonset += f"当前: {self.currently}\n" # Currently + commonset += f"生活方式: {self.lifestyle}\n" # Lifestyle + commonset += f"每日计划需求: {self.daily_plan_req}\n" # Daily plan requirement + commonset += f"当前日期: {self.curr_time.strftime('%A %B %d')}\n" # Current Date return commonset @@ -518,13 +492,13 @@ def add_new_action(self, def act_time_str(self): """ - Returns a string output of the current time. + 返回当前时间的字符串输出。 - INPUT - None - OUTPUT - A string output of the current time. - EXAMPLE STR OUTPUT + 输入 + 无 + 输出 + 当前时间的字符串输出。 + 输出字符串示例 "14:05 P.M." """ return self.act_start_time.strftime("%H:%M %p") @@ -532,14 +506,14 @@ def act_time_str(self): def act_check_finished(self): """ - Checks whether the self.Action instance has finished. - - INPUT - curr_datetime: Current time. If current time is later than the action's - start time + its duration, then the action has finished. - OUTPUT - Boolean [True]: Action has finished. - Boolean [False]: Action has not finished and is still ongoing. + 检查 self.Action 实例是否已完成。 + + 输入 + curr_datetime: 当前时间。如果当前时间晚于动作的开始时间 + 持续时间, + 则动作已完成。 + 输出 + 布尔值 [True]: 动作已完成。 + 布尔值 [False]: 动作尚未完成,仍在进行中。 """ if not self.act_address: return True @@ -560,12 +534,12 @@ def act_check_finished(self): def act_summarize(self): """ - Summarize the current action as a dictionary. + 将当前动作总结为字典。 - INPUT - None - OUTPUT - ret: A human readable summary of the action. + 输入 + 无 + 输出 + ret: 动作的人类可读摘要。 """ exp = dict() exp["persona"] = self.name @@ -579,19 +553,18 @@ def act_summarize(self): def act_summary_str(self): """ - Returns a string summary of the current action. Meant to be - human-readable. + 返回当前动作的字符串摘要。旨在供人类阅读。 - INPUT - None - OUTPUT - ret: A human readable summary of the action. + 输入 + 无 + 输出 + ret: 动作的人类可读摘要。 """ start_datetime_str = self.act_start_time.strftime("%A %B %d -- %H:%M %p") ret = f"[{start_datetime_str}]\n" - ret += f"Activity: {self.name} is {self.act_description}\n" - ret += f"Address: {self.act_address}\n" - ret += f"Duration in minutes (e.g., x min): {str(self.act_duration)} min\n" + ret += f"活动: {self.name} 正在 {self.act_description}\n" + ret += f"地址: {self.act_address}\n" + ret += f"持续时间(分钟) (例如 x 分钟): {str(self.act_duration)} 分钟\n" return ret diff --git a/reverie/backend_server/persona/memory_structures/spatial_memory.py b/reverie/backend_server/persona/memory_structures/spatial_memory.py index 1fe510b3f4..f3995120d7 100644 --- a/reverie/backend_server/persona/memory_structures/spatial_memory.py +++ b/reverie/backend_server/persona/memory_structures/spatial_memory.py @@ -1,9 +1,9 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: spatial_memory.py -Description: Defines the MemoryTree class that serves as the agents' spatial -memory that aids in grounding their behavior in the game world. +文件: spatial_memory.py +描述: 定义了 MemoryTree 类,作为代理的空间记忆, +辅助将其行为锚定在游戏世界中。 """ import json import sys @@ -43,17 +43,16 @@ def save(self, out_json): def get_str_accessible_sectors(self, curr_world): """ - Returns a summary string of all the arenas that the persona can access - within the current sector. + 返回一个摘要字符串,包含角色在当前世界(world)中可以访问的所有区域(sector)。 - Note that there are places a given persona cannot enter. This information - is provided in the persona sheet. We account for this in this function. + 请注意,某些地方特定角色无法进入。此信息在角色表中提供。 + 我们在此函数中考虑了这一点。 - INPUT - None - OUTPUT - A summary string of all the arenas that the persona can access. - EXAMPLE STR OUTPUT + 输入 + curr_world: 当前世界名称 + 输出 + 一个摘要字符串,包含角色可以访问的所有区域。 + 输出字符串示例 "bedroom, kitchen, dining room, office, bathroom" """ x = ", ".join(list(self.tree[curr_world].keys())) @@ -62,17 +61,16 @@ def get_str_accessible_sectors(self, curr_world): def get_str_accessible_sector_arenas(self, sector): """ - Returns a summary string of all the arenas that the persona can access - within the current sector. + 返回一个摘要字符串,包含角色在当前区域(sector)中可以访问的所有竞技场(arena)。 - Note that there are places a given persona cannot enter. This information - is provided in the persona sheet. We account for this in this function. + 请注意,某些地方特定角色无法进入。此信息在角色表中提供。 + 我们在此函数中考虑了这一点。 - INPUT - None - OUTPUT - A summary string of all the arenas that the persona can access. - EXAMPLE STR OUTPUT + 输入 + sector: 当前区域的字符串表示 (例如 "world_name:sector_name") + 输出 + 一个摘要字符串,包含角色可以访问的所有竞技场。 + 输出字符串示例 "bedroom, kitchen, dining room, office, bathroom" """ curr_world, curr_sector = sector.split(":") @@ -84,16 +82,15 @@ def get_str_accessible_sector_arenas(self, sector): def get_str_accessible_arena_game_objects(self, arena): """ - Get a str list of all accessible game objects that are in the arena. If - temp_address is specified, we return the objects that are available in - that arena, and if not, we return the objects that are in the arena our - persona is currently in. - - INPUT - temp_address: optional arena address - OUTPUT - str list of all accessible game objects in the gmae arena. - EXAMPLE STR OUTPUT + 获取竞技场(arena)中所有可访问游戏对象的字符串列表。如果 + 指定了 temp_address,我们返回该竞技场中可用的对象; + 如果没有指定,则返回我们角色当前所在竞技场中的对象。 + + 输入 + arena: 竞技场地址 (例如 "world:sector:arena") + 输出 + 游戏竞技场中所有可访问游戏对象的字符串列表。 + 输出字符串示例 "phone, charger, bed, nightstand" """ curr_world, curr_sector, curr_arena = arena.split(":") diff --git a/reverie/backend_server/persona/persona.py b/reverie/backend_server/persona/persona.py index c4a3966fdd..ae674613ef 100644 --- a/reverie/backend_server/persona/persona.py +++ b/reverie/backend_server/persona/persona.py @@ -1,12 +1,11 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: persona.py -Description: Defines the Persona class that powers the agents in Reverie. +文件: persona.py +描述: 定义了为 Reverie 中的代理提供支持的 Persona 类。 -Note (May 1, 2023) -- this is effectively GenerativeAgent class. Persona was -the term we used internally back in 2022, taking from our Social Simulacra -paper. +注意 (2023年5月1日) -- 这实际上就是 GenerativeAgent 类。Persona 是 +我们在2022年内部使用的术语,源于我们的 Social Simulacra 论文。 """ import math import sys @@ -29,35 +28,33 @@ class Persona: def __init__(self, name, folder_mem_saved=False): - # PERSONA BASE STATE - # is the full name of the persona. This is a unique identifier for - # the persona within Reverie. + # 角色基本状态 + # 是角色的全名。这是 Reverie 中角色的唯一标识符。 self.name = name - # PERSONA MEMORY - # If there is already memory in folder_mem_saved, we load that. Otherwise, - # we create new memory instances. - # is the persona's spatial memory. + # 角色记忆 + # 如果 folder_mem_saved 中已有记忆,则加载它。否则,我们创建新的记忆实例。 + # 是角色的空间记忆。 f_s_mem_saved = f"{folder_mem_saved}/bootstrap_memory/spatial_memory.json" self.s_mem = MemoryTree(f_s_mem_saved) - # is the persona's associative memory. + # 是角色的联想记忆。 f_a_mem_saved = f"{folder_mem_saved}/bootstrap_memory/associative_memory" self.a_mem = AssociativeMemory(f_a_mem_saved) - # is the persona's scratch (short term memory) space. + # 是角色的暂存(短期记忆)空间。 scratch_saved = f"{folder_mem_saved}/bootstrap_memory/scratch.json" self.scratch = Scratch(scratch_saved) def save(self, save_folder): """ - Save persona's current state (i.e., memory). + 保存角色的当前状态(即记忆)。 - INPUT: - save_folder: The folder where we wil be saving our persona's state. - OUTPUT: - None + 输入: + save_folder: 我们将保存角色状态的文件夹。 + 输出: + 无 """ - # Spatial memory contains a tree in a json format. + # 空间记忆包含一个 JSON 格式的树。 # e.g., {"double studio": # {"double studio": # {"bedroom 2": @@ -65,105 +62,93 @@ def save(self, save_folder): f_s_mem = f"{save_folder}/spatial_memory.json" self.s_mem.save(f_s_mem) - # Associative memory contains a csv with the following rows: + # 联想记忆包含一个 CSV 文件,包含以下行: # [event.type, event.created, event.expiration, s, p, o] # e.g., event,2022-10-23 00:00:00,,Isabella Rodriguez,is,idle f_a_mem = f"{save_folder}/associative_memory" self.a_mem.save(f_a_mem) - # Scratch contains non-permanent data associated with the persona. When - # it is saved, it takes a json form. When we load it, we move the values - # to Python variables. + # 暂存空间包含与角色相关的非永久性数据。当它被保存时,它采用 JSON 格式。当我们加载它时,我们将值移动到 Python 变量中。 f_scratch = f"{save_folder}/scratch.json" self.scratch.save(f_scratch) def perceive(self, maze): """ - This function takes the current maze, and returns events that are - happening around the persona. Importantly, perceive is guided by - two key hyper-parameter for the persona: 1) att_bandwidth, and - 2) retention. - - First, determines the number of nearby events that the - persona can perceive. Say there are 10 events that are within the vision - radius for the persona -- perceiving all 10 might be too much. So, the - persona perceives the closest att_bandwidth number of events in case there - are too many events. - - Second, the persona does not want to perceive and think about the same - event at each time step. That's where comes in -- there is - temporal order to what the persona remembers. So if the persona's memory - contains the current surrounding events that happened within the most - recent retention, there is no need to perceive that again. xx - - INPUT: - maze: Current instance of the world. - OUTPUT: - a list of that are perceived and new. - See associative_memory.py -- but to get you a sense of what it - receives as its input: "s, p, o, desc, persona.scratch.curr_time" + 此函数接收当前迷宫,并返回角色周围发生的事件。重要的是,感知由角色的两个关键超参数引导: + 1) att_bandwidth (注意力带宽),以及 2) retention (记忆保留度)。 + + 首先, 决定了角色可以感知的附近事件的数量。 + 假设在角色的视觉半径内有10个事件——感知所有10个可能太多了。 + 因此,如果事件过多,角色会感知最近的 att_bandwidth 个事件。 + + 其次,角色不希望在每个时间步都感知和思考相同的事件。 + 这就是 发挥作用的地方——角色记忆的内容有时间顺序。 + 因此,如果角色的记忆中包含了在最近的 retention 时间内发生的当前周围事件, + 则无需再次感知。xx + + 输入: + maze: 世界的当前 实例。 + 输出: + 一个 (概念节点) 列表,包含感知到的新事件。 + 参见 associative_memory.py —— 但为了让你了解它接收的输入内容: "s, p, o, desc, persona.scratch.curr_time" """ return perceive(self, maze) def retrieve(self, perceived): """ - This function takes the events that are perceived by the persona as input - and returns a set of related events and thoughts that the persona would - need to consider as context when planning. - - INPUT: - perceive: a list of that are perceived and new. - OUTPUT: - retrieved: dictionary of dictionary. The first layer specifies an event, - while the latter layer specifies the "curr_event", "events", - and "thoughts" that are relevant. + 此函数将角色感知到的事件作为输入, + 并返回一组相关的事件和想法,角色在规划时需要将这些作为上下文来考虑。 + + 输入: + perceive: 一个 (概念节点) 列表,包含感知到的新事件。 + 输出: + retrieved: 字典的字典。第一层指定一个事件, + 而后者层指定相关的 "curr_event" (当前事件), "events" (事件), + 和 "thoughts" (想法)。 """ return retrieve(self, perceived) def plan(self, maze, personas, new_day, retrieved): """ - Main cognitive function of the chain. It takes the retrieved memory and - perception, as well as the maze and the first day state to conduct both - the long term and short term planning for the persona. - - INPUT: - maze: Current instance of the world. - personas: A dictionary that contains all persona names as keys, and the - Persona instance as values. - new_day: This can take one of the three values. - 1) False -- It is not a "new day" cycle (if it is, we would - need to call the long term planning sequence for the persona). - 2) "First day" -- It is literally the start of a simulation, - so not only is it a new day, but also it is the first day. - 2) "New day" -- It is a new day. - retrieved: dictionary of dictionary. The first layer specifies an event, - while the latter layer specifies the "curr_event", "events", - and "thoughts" that are relevant. - OUTPUT - The target action address of the persona (persona.scratch.act_address). + 认知链的主要功能。它接收检索到的记忆和感知, + 以及迷宫和第一天的状态,以便为角色进行长期和短期规划。 + + 输入: + maze: 世界的当前 实例。 + personas: 一个字典,其中包含所有角色名称作为键,Persona 实例作为值。 + new_day: 可以是以下三个值之一。 + 1) <布尔值> False -- 不是 "新的一天" 周期(如果是,我们需要 + 为角色调用长期规划序列)。 + 2) <字符串> "First day" -- 这确实是模拟的开始, + 所以它不仅是新的一天,也是第一天。 + 3) <字符串> "New day" -- 这是新的一天。 + retrieved: 字典的字典。第一层指定一个事件, + 而后者层指定相关的 "curr_event" (当前事件), "events" (事件), + 和 "thoughts" (想法)。 + 输出 + 角色的目标动作地址 (persona.scratch.act_address)。 """ return plan(self, maze, personas, new_day, retrieved) def execute(self, maze, personas, plan): """ - This function takes the agent's current plan and outputs a concrete - execution (what object to use, and what tile to travel to). - - INPUT: - maze: Current instance of the world. - personas: A dictionary that contains all persona names as keys, and the - Persona instance as values. - plan: The target action address of the persona - (persona.scratch.act_address). - OUTPUT: - execution: A triple set that contains the following components: - is a x,y coordinate. e.g., (58, 9) - is an emoji. - is a string description of the movement. e.g., + 此函数接收代理的当前计划并输出一个具体的执行方案 + (使用什么对象,以及移动到哪个瓦片)。 + + 输入: + maze: 世界的当前 实例。 + personas: 一个字典,其中包含所有角色名称作为键,Persona 实例作为值。 + plan: 角色的目标动作地址 + (persona.scratch.act_address)。 + 输出: + execution: 一个包含以下组件的三元组: + 是一个 x,y 坐标。例如:(58, 9) + 是一个表情符号。 + 是动作的字符串描述。例如: writing her next novel (editing her novel) @ double studio:double studio:common room:sofa """ @@ -172,60 +157,55 @@ def execute(self, maze, personas, plan): def reflect(self): """ - Reviews the persona's memory and create new thoughts based on it. + 回顾角色的记忆并基于此产生新的想法。 - INPUT: - None - OUTPUT: - None + 输入: + 无 + 输出: + 无 """ reflect(self) def move(self, maze, personas, curr_tile, curr_time): """ - This is the main cognitive function where our main sequence is called. - - INPUT: - maze: The Maze class of the current world. - personas: A dictionary that contains all persona names as keys, and the - Persona instance as values. - curr_tile: A tuple that designates the persona's current tile location - in (row, col) form. e.g., (58, 39) - curr_time: datetime instance that indicates the game's current time. - OUTPUT: - execution: A triple set that contains the following components: - is a x,y coordinate. e.g., (58, 9) - is an emoji. - is a string description of the movement. e.g., + 这是调用我们主序列的主要认知功能。 + + 输入: + maze: 当前世界的 Maze 类。 + personas: 一个字典,其中包含所有角色名称作为键,Persona 实例作为值。 + curr_tile: 一个元组,以 (行, 列) 形式指定角色的当前瓦片位置。例如:(58, 39) + curr_time: 表示游戏当前时间的 datetime 实例。 + 输出: + execution: 一个包含以下组件的三元组: + 是一个 x,y 坐标。例如:(58, 9) + 是一个表情符号。 + 是动作的字符串描述。例如: writing her next novel (editing her novel) @ double studio:double studio:common room:sofa """ - # Updating persona's scratch memory with . + # 用 更新角色的暂存记忆。 self.scratch.curr_tile = curr_tile - # We figure out whether the persona started a new day, and if it is a new - # day, whether it is the very first day of the simulation. This is - # important because we set up the persona's long term plan at the start of - # a new day. + # 我们判断角色是否开始了新的一天,如果是新的一天,是否是模拟的第一天。这很重要,因为我们在新的一天开始时为角色设定长期计划。 new_day = False if not self.scratch.curr_time: - new_day = "First day" + new_day = "First day" # Do not translate elif (self.scratch.curr_time.strftime('%A %B %d') != curr_time.strftime('%A %B %d')): - new_day = "New day" + new_day = "New day" # Do not translate self.scratch.curr_time = curr_time - # Main cognitive sequence begins here. + # 主要认知序列从这里开始。 perceived = self.perceive(maze) retrieved = self.retrieve(perceived) plan = self.plan(maze, personas, new_day, retrieved) self.reflect() - # is a triple set that contains the following components: - # is a x,y coordinate. e.g., (58, 9) - # is an emoji. e.g., "\ud83d\udca4" - # is a string description of the movement. e.g., + # 是一个包含以下组件的三元组: + # 是一个 x,y 坐标。例如:(58, 9) + # 是一个表情符号。例如:"\ud83d\udca4" + # 是动作的字符串描述。例如: # writing her next novel (editing her novel) # @ double studio:double studio:common room:sofa return self.execute(maze, personas, plan) diff --git a/reverie/backend_server/reverie.py b/reverie/backend_server/reverie.py index 2d753d1029..527db99a3a 100644 --- a/reverie/backend_server/reverie.py +++ b/reverie/backend_server/reverie.py @@ -27,6 +27,7 @@ import os import shutil import traceback +import logging from selenium import webdriver @@ -35,6 +36,20 @@ from maze import * from persona.persona import * +# ================================================================================================== +# CONFIGURE LOGGER +# ================================================================================================== +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) # Default logging level +# Create a stream handler to output to console +stream_handler = logging.StreamHandler() +# Define log format +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +stream_handler.setFormatter(formatter) +# Add the handler to the logger +if not logger.handlers: + logger.addHandler(stream_handler) + ############################################################################## # REVERIE # ############################################################################## @@ -48,22 +63,41 @@ def __init__(self, # Interestingly, all simulations must be forked from some initial # simulation, where the first simulation is "hand-crafted". self.fork_sim_code = fork_sim_code + logger.info(f"Forking simulation from: {self.fork_sim_code}") fork_folder = f"{fs_storage}/{self.fork_sim_code}" # indicates our current simulation. The first step here is to # copy everything that's in , but edit its # reverie/meta/json's fork variable. self.sim_code = sim_code + logger.info(f"Initializing new simulation: {self.sim_code}") sim_folder = f"{fs_storage}/{self.sim_code}" - copyanything(fork_folder, sim_folder) - - with open(f"{sim_folder}/reverie/meta.json") as json_file: - reverie_meta = json.load(json_file) - - with open(f"{sim_folder}/reverie/meta.json", "w") as outfile: - reverie_meta["fork_sim_code"] = fork_sim_code - outfile.write(json.dumps(reverie_meta, indent=2)) - + try: + copyanything(fork_folder, sim_folder) + logger.info(f"Copied data from {fork_folder} to {sim_folder}") + except Exception as e: + logger.error(f"Error copying data from {fork_folder} to {sim_folder}: {e}", exc_info=True) + # Depending on the desired behavior, we might want to raise the exception or exit + raise # Reraising the exception as this is a critical step + + try: + with open(f"{sim_folder}/reverie/meta.json") as json_file: + reverie_meta = json.load(json_file) + with open(f"{sim_folder}/reverie/meta.json", "w") as outfile: + reverie_meta["fork_sim_code"] = fork_sim_code + outfile.write(json.dumps(reverie_meta, indent=2)) + logger.info(f"Updated meta.json in {sim_folder} with fork_sim_code: {fork_sim_code}") + except FileNotFoundError: + logger.error(f"meta.json not found in {sim_folder}. This should have been copied from the fork.", exc_info=True) + raise + except json.JSONDecodeError: + logger.error(f"Error decoding meta.json in {sim_folder}.", exc_info=True) + raise + except IOError as e: + logger.error(f"IOError when accessing meta.json in {sim_folder}: {e}", exc_info=True) + raise + + logger.info("Loading Reverie's global variables...") # LOADING REVERIE'S GLOBAL VARIABLES # The start datetime of the Reverie: # is the datetime instance for the start datetime of @@ -71,28 +105,37 @@ def __init__(self, # change. It takes a string date in the following example form: # "June 25, 2022" # e.g., ...strptime(June 25, 2022, "%B %d, %Y") - self.start_time = datetime.datetime.strptime( - f"{reverie_meta['start_date']}, 00:00:00", - "%B %d, %Y, %H:%M:%S") - # is the datetime instance that indicates the game's current - # time. This gets incremented by amount everytime the world - # progresses (that is, everytime curr_env_file is recieved). - self.curr_time = datetime.datetime.strptime(reverie_meta['curr_time'], - "%B %d, %Y, %H:%M:%S") - # denotes the number of seconds in game time that each - # step moves foward. - self.sec_per_step = reverie_meta['sec_per_step'] - - # is the main Maze instance. Note that we pass in the maze_name - # (e.g., "double_studio") to instantiate Maze. - # e.g., Maze("double_studio") - self.maze = Maze(reverie_meta['maze_name']) - - # denotes the number of steps that our game has taken. A step here - # literally translates to the number of moves our personas made in terms - # of the number of tiles. - self.step = reverie_meta['step'] - + try: + self.start_time = datetime.datetime.strptime( + f"{reverie_meta['start_date']}, 00:00:00", + "%B %d, %Y, %H:%M:%S") + # is the datetime instance that indicates the game's current + # time. This gets incremented by amount everytime the world + # progresses (that is, everytime curr_env_file is recieved). + self.curr_time = datetime.datetime.strptime(reverie_meta['curr_time'], + "%B %d, %Y, %H:%M:%S") + # denotes the number of seconds in game time that each + # step moves foward. + self.sec_per_step = reverie_meta['sec_per_step'] + + # is the main Maze instance. Note that we pass in the maze_name + # (e.g., "double_studio") to instantiate Maze. + # e.g., Maze("double_studio") + self.maze = Maze(reverie_meta['maze_name']) + + # denotes the number of steps that our game has taken. A step here + # literally translates to the number of moves our personas made in terms + # of the number of tiles. + self.step = reverie_meta['step'] + logger.info(f"Reverie meta loaded: Start time: {self.start_time}, Current time: {self.curr_time}, Step: {self.step}") + except KeyError as e: + logger.error(f"KeyError accessing Reverie meta: {e}. Check meta.json structure.", exc_info=True) + raise + except ValueError as e: + logger.error(f"ValueError during datetime parsing from Reverie meta: {e}.", exc_info=True) + raise + + logger.info("Setting up personas...") # SETTING UP PERSONAS IN REVERIE # is a dictionary that takes the persona's full name as its # keys, and the actual persona instance as its values. @@ -119,23 +162,42 @@ def __init__(self, # self.persona_convo = dict() # Loading in all personas. - init_env_file = f"{sim_folder}/environment/{str(self.step)}.json" - init_env = json.load(open(init_env_file)) - for persona_name in reverie_meta['persona_names']: - persona_folder = f"{sim_folder}/personas/{persona_name}" - p_x = init_env[persona_name]["x"] - p_y = init_env[persona_name]["y"] - curr_persona = Persona(persona_name, persona_folder) - - self.personas[persona_name] = curr_persona - self.personas_tile[persona_name] = (p_x, p_y) - self.maze.tiles[p_y][p_x]["events"].add(curr_persona.scratch - .get_curr_event_and_desc()) + try: + init_env_file = f"{sim_folder}/environment/{str(self.step)}.json" + with open(init_env_file) as f: + init_env = json.load(f) + logger.info(f"Loading initial environment from: {init_env_file}") + + for persona_name in reverie_meta['persona_names']: + logger.debug(f"Loading persona: {persona_name}") + persona_folder = f"{sim_folder}/personas/{persona_name}" + p_x = init_env[persona_name]["x"] + p_y = init_env[persona_name]["y"] + curr_persona = Persona(persona_name, persona_folder) + + self.personas[persona_name] = curr_persona + self.personas_tile[persona_name] = (p_x, p_y) + self.maze.tiles[p_y][p_x]["events"].add(curr_persona.scratch + .get_curr_event_and_desc()) + logger.info(f"Loaded persona {persona_name} at tile ({p_x}, {p_y})") + except FileNotFoundError: + logger.error(f"Initial environment file {init_env_file} not found.", exc_info=True) + raise + except json.JSONDecodeError: + logger.error(f"Error decoding initial environment file {init_env_file}.", exc_info=True) + raise + except KeyError as e: + logger.error(f"KeyError when loading persona data from init_env or reverie_meta: {e}", exc_info=True) + raise + except Exception as e: + logger.error(f"An unexpected error occurred during persona setup: {e}", exc_info=True) + raise # REVERIE SETTINGS PARAMETERS: # denotes the amount of time that our while loop rests each # cycle; this is to not kill our machine. self.server_sleep = 0.1 + logger.info(f"Server sleep time set to: {self.server_sleep}s") # SIGNALING THE FRONTEND SERVER: # curr_sim_code.json contains the current simulation code, and @@ -143,15 +205,22 @@ def __init__(self, # used to communicate the code and step information to the frontend. # Note that step file is removed as soon as the frontend opens up the # simulation. - curr_sim_code = dict() - curr_sim_code["sim_code"] = self.sim_code - with open(f"{fs_temp_storage}/curr_sim_code.json", "w") as outfile: - outfile.write(json.dumps(curr_sim_code, indent=2)) - - curr_step = dict() - curr_step["step"] = self.step - with open(f"{fs_temp_storage}/curr_step.json", "w") as outfile: - outfile.write(json.dumps(curr_step, indent=2)) + logger.info("Signaling frontend server with simulation code and step...") + try: + curr_sim_code = dict() + curr_sim_code["sim_code"] = self.sim_code + with open(f"{fs_temp_storage}/curr_sim_code.json", "w") as outfile: + outfile.write(json.dumps(curr_sim_code, indent=2)) + + curr_step = dict() + curr_step["step"] = self.step + with open(f"{fs_temp_storage}/curr_step.json", "w") as outfile: + outfile.write(json.dumps(curr_step, indent=2)) + logger.info(f"Frontend signal files created in {fs_temp_storage}") + except IOError as e: + logger.error(f"IOError writing frontend signal files: {e}", exc_info=True) + except Exception as e: + logger.error(f"Unexpected error writing frontend signal files: {e}", exc_info=True) def save(self): @@ -165,26 +234,39 @@ def save(self): None * Saves all relevant data to the designated memory directory """ + logger.info(f"Saving simulation state for sim_code: {self.sim_code}") # points to the current simulation folder. sim_folder = f"{fs_storage}/{self.sim_code}" - # Save Reverie meta information. - reverie_meta = dict() - reverie_meta["fork_sim_code"] = self.fork_sim_code - reverie_meta["start_date"] = self.start_time.strftime("%B %d, %Y") - reverie_meta["curr_time"] = self.curr_time.strftime("%B %d, %Y, %H:%M:%S") - reverie_meta["sec_per_step"] = self.sec_per_step - reverie_meta["maze_name"] = self.maze.maze_name - reverie_meta["persona_names"] = list(self.personas.keys()) - reverie_meta["step"] = self.step - reverie_meta_f = f"{sim_folder}/reverie/meta.json" - with open(reverie_meta_f, "w") as outfile: - outfile.write(json.dumps(reverie_meta, indent=2)) - - # Save the personas. - for persona_name, persona in self.personas.items(): - save_folder = f"{sim_folder}/personas/{persona_name}/bootstrap_memory" - persona.save(save_folder) + try: + # Save Reverie meta information. + reverie_meta = dict() + reverie_meta["fork_sim_code"] = self.fork_sim_code + reverie_meta["start_date"] = self.start_time.strftime("%B %d, %Y") + reverie_meta["curr_time"] = self.curr_time.strftime("%B %d, %Y, %H:%M:%S") + reverie_meta["sec_per_step"] = self.sec_per_step + reverie_meta["maze_name"] = self.maze.maze_name + reverie_meta["persona_names"] = list(self.personas.keys()) + reverie_meta["step"] = self.step + reverie_meta_f = f"{sim_folder}/reverie/meta.json" + with open(reverie_meta_f, "w") as outfile: + outfile.write(json.dumps(reverie_meta, indent=2)) + logger.info(f"Saved Reverie meta to {reverie_meta_f}") + except IOError as e: + logger.error(f"IOError saving Reverie meta: {e}", exc_info=True) + except Exception as e: + logger.error(f"Unexpected error saving Reverie meta: {e}", exc_info=True) + + try: + # Save the personas. + for persona_name, persona in self.personas.items(): + logger.debug(f"Saving persona: {persona_name}") + save_folder = f"{sim_folder}/personas/{persona_name}/bootstrap_memory" + persona.save(save_folder) + logger.info(f"Saved all personas to {sim_folder}/personas") + except Exception as e: # Persona save method might raise various exceptions + logger.error(f"Error saving one or more personas: {e}", exc_info=True) + logger.info("Save process completed.") def start_path_tester_server(self): @@ -202,7 +284,9 @@ def start_path_tester_server(self): * Saves the spatial memory of the test agent to the path_tester_env.json of the temp storage. """ + logger.info("Starting path tester server...") def print_tree(tree): + # This function is for direct output, so keeping print statements. def _print_tree(tree, depth): dash = " >" * depth @@ -230,10 +314,22 @@ def _print_tree(tree, depth): curr_dict = {} tester_file = fs_temp_storage + "/path_tester_env.json" if check_if_file_exists(tester_file): - with open(tester_file) as json_file: - curr_dict = json.load(json_file) + logger.debug(f"Path tester env file found: {tester_file}") + try: + with open(tester_file) as json_file: + curr_dict = json.load(json_file) os.remove(tester_file) - + logger.debug(f"Processed and removed {tester_file}") + except json.JSONDecodeError: + logger.error(f"Error decoding JSON from {tester_file}", exc_info=True) + continue # Skip this iteration if file is corrupted + except IOError as e: + logger.error(f"IOError with {tester_file}: {e}", exc_info=True) + continue # Skip this iteration + except Exception as e: + logger.error(f"Unexpected error processing {tester_file}: {e}", exc_info=True) + continue + # Current camera location curr_sts = self.maze.sq_tile_size curr_camera = (int(math.ceil(curr_dict["x"]/curr_sts)), @@ -264,13 +360,23 @@ def _print_tree(tree, depth): i_det["game_object"]] # Incrementally outputting the s_mem and saving the json file. - print ("= " * 15) + # This print is for console feedback during path testing. + print ("= " * 15) out_file = fs_temp_storage + "/path_tester_out.json" - with open(out_file, "w") as outfile: - outfile.write(json.dumps(s_mem, indent=2)) - print_tree(s_mem) - - except: + try: + with open(out_file, "w") as outfile: + outfile.write(json.dumps(s_mem, indent=2)) + # This print_tree is for direct console output. + print_tree(s_mem) + except IOError as e: + logger.error(f"IOError writing path_tester_out.json: {e}", exc_info=True) + except Exception as e: + logger.error(f"Unexpected error writing path_tester_out.json: {e}", exc_info=True) + + + except Exception as e: # Catching broader exceptions in the while True loop + logger.error(f"An error occurred in path tester server loop: {e}", exc_info=True) + # Pass to allow the loop to continue, or add specific error handling logic pass time.sleep(self.server_sleep * 10) @@ -289,6 +395,7 @@ def start_server(self, int_counter): OUTPUT None """ + logger.info(f"Starting Reverie server for {int_counter} steps.") # points to the current simulation folder. sim_folder = f"{fs_storage}/{self.sim_code}" @@ -306,14 +413,17 @@ def start_server(self, int_counter): while (True): # Done with this iteration if reaches 0. if int_counter == 0: + logger.info("Reached target step count. Server loop ending.") break - + + env_retrieved = False # ensure env_retrieved is defined # file is the file that our frontend outputs. When the # frontend has done its job and moved the personas, then it will put a # new environment file that matches our step count. That's when we run # the content of this for loop. Otherwise, we just wait. curr_env_file = f"{sim_folder}/environment/{self.step}.json" if check_if_file_exists(curr_env_file): + logger.debug(f"Environment file found: {curr_env_file} for step {self.step}") # If we have an environment file, it means we have a new perception # input to our personas. So we first retrieve it. try: @@ -321,96 +431,378 @@ def start_server(self, int_counter): with open(curr_env_file) as json_file: new_env = json.load(json_file) env_retrieved = True - except: + logger.debug(f"Successfully loaded environment from {curr_env_file}") + except FileNotFoundError: # Should be caught by check_if_file_exists, but good for robustness + logger.error(f"Error: Environment file not found at {curr_env_file} (should have been checked)", exc_info=True) + pass # Or time.sleep(x) then continue + except json.JSONDecodeError: + logger.error(f"Error: Could not decode JSON from {curr_env_file}", exc_info=True) + pass + except IOError as e: + logger.error(f"Error: Could not read file at {curr_env_file}. IO Error: {e}", exc_info=True) + pass + except Exception as e: + logger.error(f"An unexpected error occurred while retrieving environment file: {e}", exc_info=True) pass if env_retrieved: + logger.debug(f"Processing step {self.step}") # This is where we go through to clean up all # object actions that were used in this cylce. - for key, val in game_obj_cleanup.items(): - # We turn all object actions to their blank form (with None). - self.maze.turn_event_from_tile_idle(key, val) - # Then we initialize game_obj_cleanup for this cycle. - game_obj_cleanup = dict() - - # We first move our personas in the backend environment to match - # the frontend environment. - for persona_name, persona in self.personas.items(): - # is the tile that the persona was at previously. - curr_tile = self.personas_tile[persona_name] - # is the tile that the persona will move to right now, - # during this cycle. - new_tile = (new_env[persona_name]["x"], - new_env[persona_name]["y"]) - - # We actually move the persona on the backend tile map here. - self.personas_tile[persona_name] = new_tile - self.maze.remove_subject_events_from_tile(persona.name, curr_tile) - self.maze.add_event_from_tile(persona.scratch - .get_curr_event_and_desc(), new_tile) - - # Now, the persona will travel to get to their destination. *Once* - # the persona gets there, we activate the object action. - if not persona.scratch.planned_path: - # We add that new object action event to the backend tile map. - # At its creation, it is stored in the persona's backend. - game_obj_cleanup[persona.scratch - .get_curr_obj_event_and_desc()] = new_tile + try: + for key, val in game_obj_cleanup.items(): + # We turn all object actions to their blank form (with None). + self.maze.turn_event_from_tile_idle(key, val) + # Then we initialize game_obj_cleanup for this cycle. + game_obj_cleanup = dict() + + # We first move our personas in the backend environment to match + # the frontend environment. + for persona_name, persona in self.personas.items(): + # is the tile that the persona was at previously. + curr_tile = self.personas_tile[persona_name] + # is the tile that the persona will move to right now, + # during this cycle. + new_tile = (new_env[persona_name]["x"], + new_env[persona_name]["y"]) + + # We actually move the persona on the backend tile map here. + self.personas_tile[persona_name] = new_tile + self.maze.remove_subject_events_from_tile(persona.name, curr_tile) self.maze.add_event_from_tile(persona.scratch - .get_curr_obj_event_and_desc(), new_tile) - # We also need to remove the temporary blank action for the - # object that is currently taking the action. - blank = (persona.scratch.get_curr_obj_event_and_desc()[0], - None, None, None) - self.maze.remove_event_from_tile(blank, new_tile) - - # Then we need to actually have each of the personas perceive and - # move. The movement for each of the personas comes in the form of - # x y coordinates where the persona will move towards. e.g., (50, 34) - # This is where the core brains of the personas are invoked. - movements = {"persona": dict(), - "meta": dict()} - for persona_name, persona in self.personas.items(): - # is a x,y coordinate. e.g., (58, 9) - # is an emoji. e.g., "\ud83d\udca4" - # is a string description of the movement. e.g., - # writing her next novel (editing her novel) - # @ double studio:double studio:common room:sofa - next_tile, pronunciatio, description = persona.move( - self.maze, self.personas, self.personas_tile[persona_name], - self.curr_time) - movements["persona"][persona_name] = {} - movements["persona"][persona_name]["movement"] = next_tile - movements["persona"][persona_name]["pronunciatio"] = pronunciatio - movements["persona"][persona_name]["description"] = description - movements["persona"][persona_name]["chat"] = (persona - .scratch.chat) - - # Include the meta information about the current stage in the - # movements dictionary. - movements["meta"]["curr_time"] = (self.curr_time - .strftime("%B %d, %Y, %H:%M:%S")) - - # We then write the personas' movements to a file that will be sent - # to the frontend server. - # Example json output: - # {"persona": {"Maria Lopez": {"movement": [58, 9]}}, - # "persona": {"Klaus Mueller": {"movement": [38, 12]}}, - # "meta": {curr_time: }} - curr_move_file = f"{sim_folder}/movement/{self.step}.json" - with open(curr_move_file, "w") as outfile: - outfile.write(json.dumps(movements, indent=2)) - - # After this cycle, the world takes one step forward, and the - # current time moves by amount. - self.step += 1 - self.curr_time += datetime.timedelta(seconds=self.sec_per_step) - - int_counter -= 1 - + .get_curr_event_and_desc(), new_tile) + logger.debug(f"Moved {persona_name} from {curr_tile} to {new_tile}") + + # Now, the persona will travel to get to their destination. *Once* + # the persona gets there, we activate the object action. + if not persona.scratch.planned_path: + logger.debug(f"{persona_name} reached destination, activating object action.") + # We add that new object action event to the backend tile map. + # At its creation, it is stored in the persona's backend. + game_obj_cleanup[persona.scratch + .get_curr_obj_event_and_desc()] = new_tile + self.maze.add_event_from_tile(persona.scratch + .get_curr_obj_event_and_desc(), new_tile) + # We also need to remove the temporary blank action for the + # object that is currently taking the action. + blank = (persona.scratch.get_curr_obj_event_and_desc()[0], + None, None, None) + self.maze.remove_event_from_tile(blank, new_tile) + + # Then we need to actually have each of the personas perceive and + # move. The movement for each of the personas comes in the form of + # x y coordinates where the persona will move towards. e.g., (50, 34) + # This is where the core brains of the personas are invoked. + movements = {"persona": dict(), + "meta": dict()} + for persona_name, persona in self.personas.items(): + logger.debug(f"Requesting move for {persona_name} at {self.personas_tile[persona_name]}") + # is a x,y coordinate. e.g., (58, 9) + # is an emoji. e.g., "\ud83d\udca4" + # is a string description of the movement. e.g., + # writing her next novel (editing her novel) + # @ double studio:double studio:common room:sofa + next_tile, pronunciatio, description = persona.move( + self.maze, self.personas, self.personas_tile[persona_name], + self.curr_time) + movements["persona"][persona_name] = {} + movements["persona"][persona_name]["movement"] = next_tile + movements["persona"][persona_name]["pronunciatio"] = pronunciatio + movements["persona"][persona_name]["description"] = description + movements["persona"][persona_name]["chat"] = (persona + .scratch.chat) + logger.debug(f"{persona_name} move generated: to {next_tile}, emoji {pronunciatio}, desc: {description}") + + # Include the meta information about the current stage in the + # movements dictionary. + movements["meta"]["curr_time"] = (self.curr_time + .strftime("%B %d, %Y, %H:%M:%S")) + + # We then write the personas' movements to a file that will be sent + # to the frontend server. + # Example json output: + # {"persona": {"Maria Lopez": {"movement": [58, 9]}}, + # "persona": {"Klaus Mueller": {"movement": [38, 12]}}, + # "meta": {curr_time: }} + curr_move_file = f"{sim_folder}/movement/{self.step}.json" + try: + with open(curr_move_file, "w") as outfile: + outfile.write(json.dumps(movements, indent=2)) + logger.info(f"Movements for step {self.step} written to {curr_move_file}") + except IOError as e: + logger.error(f"IOError writing movements file {curr_move_file}: {e}", exc_info=True) + except Exception as e: + logger.error(f"Unexpected error writing movements file {curr_move_file}: {e}", exc_info=True) + + + # After this cycle, the world takes one step forward, and the + # current time moves by amount. + self.step += 1 + self.curr_time += datetime.timedelta(seconds=self.sec_per_step) + logger.debug(f"Advanced to step {self.step}, current time: {self.curr_time.strftime('%B %d, %Y, %H:%M:%S')}") + + int_counter -= 1 + except KeyError as e: # Catching potential KeyErrors from new_env accesses or persona dicts + logger.error(f"KeyError during step processing: {e}. Likely an issue with environment data or persona state.", exc_info=True) + # Depending on severity, might need to break or implement more robust recovery + except Exception as e: # Catch-all for other unexpected errors during the core step processing + logger.error(f"An unexpected error occurred during server step {self.step} processing: {e}", exc_info=True) + # This might indicate a more serious issue, consider if loop should continue + # Sleep so we don't burn our machines. time.sleep(self.server_sleep) + # COMMAND HANDLER METHODS START HERE + def _handle_lifecycle_commands(self, sim_command, sim_folder): + """Handles simulation lifecycle commands: fin, exit, save, run.""" + ret_str = "" + should_break = False + + if sim_command.lower() in ["f", "fin", "finish", "save and finish"]: + logger.info("Command: Finish and Save. Saving simulation state...") + self.save() + logger.info("Simulation saved. Exiting.") # Internal log + should_break = True + + elif sim_command.lower() == "exit": + logger.info("Command: Exit without saving.") # Internal log + try: + logger.info(f"Removing simulation folder: {sim_folder}") # Internal log + shutil.rmtree(sim_folder) + logger.info(f"Successfully removed {sim_folder}") # Internal log + except OSError as e: + logger.error(f"Error: Could not remove simulation folder {sim_folder}. OS Error: {e}", exc_info=True) # Internal log + logger.info("Exiting.") # Internal log + should_break = True + + elif sim_command.lower() == "save": + logger.info("Command: Save.") # Internal log + self.save() + ret_str = "模拟进度已保存。" + + elif sim_command.startswith("run"): # Using startswith for "run" + logger.info(f"Command: Run. Input: {sim_command}") # Internal log + try: + parts = sim_command.split() + if len(parts) > 1: + int_count = int(parts[-1]) + logger.info(f"Starting server for {int_count} steps.") # Internal log + self.start_server(int_count) + ret_str = f"已完成运行 {int_count} 个步骤。" + else: + logger.error(f"Error: Number of steps not specified. Command: '{sim_command}'") # Internal log + ret_str = f"错误:未指定步骤数。" + except ValueError: + logger.error(f"Error: Invalid number of steps. Please provide an integer. Command: '{sim_command}'", exc_info=True) # Internal log + ret_str = f"错误:步骤数无效。请输入一个整数。" + except IndexError: # Should be caught by len(parts) check, but as a safeguard + logger.error(f"Error: Number of steps not specified or command malformed. Command: '{sim_command}'", exc_info=True) # Internal log + ret_str = f"错误:未指定步骤数或命令格式错误。" + + return ret_str, should_break + + def _handle_persona_info_commands(self, sim_command): + """Handles commands for printing persona information.""" + ret_str = "" + persona_name_parts = sim_command.split()[-2:] + persona_name_str = " ".join(persona_name_parts) + + if "print persona schedule" in sim_command and "hourly org" not in sim_command and "all" not in sim_command : + logger.info(f"Command: Print Persona Schedule for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + ret_str += (self.personas[persona_name_str] + .scratch.get_str_daily_schedule_summary()) + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'print persona schedule'.") # User-facing via ret_str + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + + elif "print all persona schedule" in sim_command: + logger.info("Command: Print All Persona Schedules") # Internal log + for p_name, persona in self.personas.items(): + ret_str += f"{p_name}\n" + ret_str += f"{persona.scratch.get_str_daily_schedule_summary()}\n" + ret_str += f"---\n" + + elif "print hourly org persona schedule" in sim_command: + logger.info(f"Command: Print Hourly Org Persona Schedule for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + ret_str += (self.personas[persona_name_str] + .scratch.get_str_daily_schedule_hourly_org_summary()) + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'print hourly org persona schedule'.") # User-facing + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + + elif "print persona current tile" in sim_command: + logger.info(f"Command: Print Persona Current Tile for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + ret_str += str(self.personas[persona_name_str].scratch.curr_tile) + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'print persona current tile'.") # User-facing + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + + elif "print persona chatting with buffer" in sim_command: + logger.info(f"Command: Print Persona Chatting With Buffer for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + curr_persona = self.personas[persona_name_str] + for p_n, count in curr_persona.scratch.chatting_with_buffer.items(): + ret_str += f"{p_n}: {count}\n" + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'print persona chatting with buffer'.") # User-facing + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + + elif "print persona associative memory (event)" in sim_command: + logger.info(f"Command: Print Persona Associative Memory (Event) for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + ret_str += f'{self.personas[persona_name_str].name}\n' + ret_str += (self.personas[persona_name_str].a_mem.get_str_seq_events()) + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'print persona associative memory (event)'.") # User-facing + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + + elif "print persona associative memory (thought)" in sim_command: + logger.info(f"Command: Print Persona Associative Memory (Thought) for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + ret_str += f'{self.personas[persona_name_str].name}\n' + ret_str += (self.personas[persona_name_str].a_mem.get_str_seq_thoughts()) + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'print persona associative memory (thought)'.") # User-facing + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + + elif "print persona associative memory (chat)" in sim_command: + logger.info(f"Command: Print Persona Associative Memory (Chat) for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + ret_str += f'{self.personas[persona_name_str].name}\n' + ret_str += (self.personas[persona_name_str].a_mem.get_str_seq_chats()) + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'print persona associative memory (chat)'.") # User-facing + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + + elif "print persona spatial memory" in sim_command: + logger.info(f"Command: Print Persona Spatial Memory for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + self.personas[persona_name_str].s_mem.print_tree() + ret_str = f"{persona_name_str} 的空间记忆已打印在上方。" + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'print persona spatial memory'.") # User-facing + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + return ret_str + + def _handle_world_info_commands(self, sim_command): + """Handles commands for printing world/environment information.""" + ret_str = "" + if "print current time" in sim_command: + logger.info("Command: Print Current Time") # Internal log + ret_str += f'{self.curr_time.strftime("%B %d, %Y, %H:%M:%S")}\n' + ret_str += f'步骤: {self.step}' + + elif "print tile event" in sim_command: + coords_str = sim_command[len("print tile event"):].strip() + logger.info(f"Command: Print Tile Event for coordinates: {coords_str}") # Internal log + try: + cooordinate = [int(i.strip()) for i in coords_str.split(",")] + if len(cooordinate) != 2: raise ValueError("Invalid coordinate format") + for i in self.maze.access_tile(cooordinate)["events"]: + ret_str += f"{i}\n" + except ValueError: + logger.error(f"Error: Invalid coordinates for 'print tile event'. Expected format: X, Y. Received: {coords_str}", exc_info=True) # User-facing + ret_str = f"错误:坐标无效。预期格式:X, Y。收到:{coords_str}" + except IndexError: + logger.error(f"Error: Coordinates not specified or incomplete for 'print tile event'. Received: {coords_str}", exc_info=True) # User-facing + ret_str = f"错误:未指定坐标或坐标不完整。收到:{coords_str}" + + elif "print tile details" in sim_command: + coords_str = sim_command[len("print tile details"):].strip() + logger.info(f"Command: Print Tile Details for coordinates: {coords_str}") # Internal log + try: + cooordinate = [int(i.strip()) for i in coords_str.split(",")] + if len(cooordinate) != 2: raise ValueError("Invalid coordinate format") + for key, val in self.maze.access_tile(cooordinate).items(): + ret_str += f"{key}: {val}\n" + except ValueError: + logger.error(f"Error: Invalid coordinates for 'print tile details'. Expected format: X, Y. Received: {coords_str}", exc_info=True) # User-facing + ret_str = f"错误:坐标无效。预期格式:X, Y。收到:{coords_str}" + except IndexError: + logger.error(f"Error: Coordinates not specified or incomplete for 'print tile details'. Received: {coords_str}", exc_info=True) # User-facing + ret_str = f"错误:未指定坐标或坐标不完整。收到:{coords_str}" + return ret_str + + def _handle_call_commands(self, sim_command): + """Handles 'call' commands.""" + ret_str = "" + if "call -- analysis" in sim_command: + persona_name_str = sim_command[len("call -- analysis"):].strip() + logger.info(f"Command: Call -- Analysis for {persona_name_str}") # Internal log + if persona_name_str in self.personas: + self.personas[persona_name_str].open_convo_session("analysis") + ret_str = f"已为 {persona_name_str} 打开对话会话 'analysis'。" + else: + logger.warning(f"Persona '{persona_name_str}' not found for 'call -- analysis'.") # User-facing + ret_str = f"错误:未找到角色 '{persona_name_str}'。" + + elif "call -- load history" in sim_command: + file_path_str = sim_command[len("call -- load history"):].strip() + logger.info(f"Command: Call -- Load History from file: {file_path_str}") # Internal log + curr_file = maze_assets_loc + "/" + file_path_str + try: + rows = read_file_to_list(curr_file, header=True, strip_trail=True)[1] + clean_whispers = [] + for row_idx, row in enumerate(rows): + if not row: + logger.debug(f"Skipping empty row {row_idx+1} in {curr_file}") # Internal log + continue + if len(row) < 2: + logger.warning(f"Skipping malformed row {row_idx+1} (length < 2) in {curr_file}: {row}") # Internal log + continue + agent_name = row[0].strip() + whispers_str = row[1] + whispers = whispers_str.split(";") if whispers_str else [] + whispers = [whisper.strip() for whisper in whispers if whisper.strip()] + if not agent_name: + logger.warning(f"Skipping row {row_idx+1} with empty agent name in {curr_file}") # Internal log + continue + if whispers: + for whisper in whispers: + clean_whispers += [[agent_name, whisper]] + logger.debug(f"Parsed {len(whispers)} whispers for agent {agent_name} from row {row_idx+1}") # Internal log + else: + logger.debug(f"No whispers found for agent {agent_name} in row {row_idx+1}") # Internal log + if clean_whispers: + load_history_via_whisper(self.personas, clean_whispers) + ret_str = f"历史记录已从 {curr_file} 加载。" + logger.info(f"Successfully loaded history from {curr_file}, {len(clean_whispers)} whispers processed.") # Internal log + else: + ret_str = f"未从 {curr_file} 中找到有效的耳语内容可供加载。" + logger.info(f"No valid whispers found in {curr_file} to load.") # Internal log + except FileNotFoundError: + logger.error(f"Error: History file not found at {curr_file}", exc_info=True) # User-facing via ret_str + ret_str = f"错误:在 {curr_file} 未找到历史记录文件。" + except IOError as e: + logger.error(f"Error: Could not read history file at {curr_file}. IO Error: {e}", exc_info=True) # User-facing via ret_str + ret_str = f"错误:无法读取位于 {curr_file} 的历史记录文件。" + except IndexError as e: + logger.error(f"Error: Malformed history file or data at {curr_file}. Error: {e}", exc_info=True) # User-facing via ret_str + ret_str = f"错误:位于 {curr_file} 的历史记录文件或数据格式错误。" + except Exception as e: + logger.error(f"An unexpected error occurred while loading history from {curr_file}: {e}", exc_info=True) # User-facing via ret_str + ret_str = f"从 {curr_file} 加载历史记录时发生意外错误。" + return ret_str + + def _handle_path_tester_command(self, sim_command, sim_folder): + """Handles the 'start path tester mode' command.""" + should_break = False + if sim_command.lower() == "start path tester mode": + logger.info("Command: Start Path Tester Mode.") # Internal log + try: + logger.info(f"Removing simulation folder: {sim_folder}") # Internal log + shutil.rmtree(sim_folder) + logger.info(f"Successfully removed {sim_folder}") # Internal log + except OSError as e: + logger.error(f"Error: Could not remove simulation folder {sim_folder}. OS Error: {e}", exc_info=True) # Internal log + self.start_path_tester_server() + logger.info("Exiting after path tester mode.") # Internal log + should_break = True # Path tester mode is blocking and requires restart + return "", should_break + # COMMAND HANDLER METHODS END HERE def open_server(self): """ @@ -422,194 +814,101 @@ def open_server(self): OUTPUT None """ - print ("Note: The agents in this simulation package are computational") - print ("constructs powered by generative agents architecture and LLM. We") - print ("clarify that these agents lack human-like agency, consciousness,") - print ("and independent decision-making.\n---") + logger.info("--- 启动 Reverie 交互式服务器 ---") + logger.info("注意:此模拟包中的代理是计算结构,") + logger.info("由生成式代理架构和大型语言模型驱动。我们") + logger.info("澄清这些代理不具备类人的能动性、意识,") + logger.info("以及独立的决策能力。\n---") - # points to the current simulation folder. sim_folder = f"{fs_storage}/{self.sim_code}" while True: - sim_command = input("Enter option: ") - sim_command = sim_command.strip() + sim_command = input("请输入选项: ") + sim_command = sim_command.strip().lower() # Normalize command + logger.info(f"Received command: {sim_command}") # Internal log ret_str = "" + should_break = False try: - if sim_command.lower() in ["f", "fin", "finish", "save and finish"]: - # Finishes the simulation environment and saves the progress. - # Example: fin - self.save() + if sim_command in ["f", "fin", "finish", "save and finish", "exit", "save"] or \ + sim_command.startswith("run"): + ret_str, should_break = self._handle_lifecycle_commands(sim_command, sim_folder) + + elif sim_command == "start path tester mode": + ret_str, should_break = self._handle_path_tester_command(sim_command, sim_folder) + if should_break: # Path tester mode implies exit after it's done or if it's blocking + break + + elif sim_command.startswith("print persona"): + ret_str = self._handle_persona_info_commands(sim_command) + + elif sim_command.startswith("print all persona schedule"): # Specific case before generic "print persona" + ret_str = self._handle_persona_info_commands(sim_command) + + elif sim_command.startswith("print current time") or \ + sim_command.startswith("print tile event") or \ + sim_command.startswith("print tile details"): + ret_str = self._handle_world_info_commands(sim_command) + + elif sim_command.startswith("call --"): + ret_str = self._handle_call_commands(sim_command) + + else: + if sim_command: # Avoid logging for empty input + logger.warning(f"Unknown command: {sim_command}") # User-facing via ret_str + ret_str = f"未知命令: {sim_command}" + + if ret_str: + print(ret_str) + + if should_break: break - elif sim_command.lower() == "start path tester mode": - # Starts the path tester and removes the currently forked sim files. - # Note that once you start this mode, you need to exit out of the - # session and restart in case you want to run something else. - shutil.rmtree(sim_folder) - self.start_path_tester_server() - - elif sim_command.lower() == "exit": - # Finishes the simulation environment but does not save the progress - # and erases all saved data from current simulation. - # Example: exit - shutil.rmtree(sim_folder) - break - - elif sim_command.lower() == "save": - # Saves the current simulation progress. - # Example: save - self.save() - - elif sim_command[:3].lower() == "run": - # Runs the number of steps specified in the prompt. - # Example: run 1000 - int_count = int(sim_command.split()[-1]) - rs.start_server(int_count) - - elif ("print persona schedule" - in sim_command[:22].lower()): - # Print the decomposed schedule of the persona specified in the - # prompt. - # Example: print persona schedule Isabella Rodriguez - ret_str += (self.personas[" ".join(sim_command.split()[-2:])] - .scratch.get_str_daily_schedule_summary()) - - elif ("print all persona schedule" - in sim_command[:26].lower()): - # Print the decomposed schedule of all personas in the world. - # Example: print all persona schedule - for persona_name, persona in self.personas.items(): - ret_str += f"{persona_name}\n" - ret_str += f"{persona.scratch.get_str_daily_schedule_summary()}\n" - ret_str += f"---\n" - - elif ("print hourly org persona schedule" - in sim_command.lower()): - # Print the hourly schedule of the persona specified in the prompt. - # This one shows the original, non-decomposed version of the - # schedule. - # Ex: print persona schedule Isabella Rodriguez - ret_str += (self.personas[" ".join(sim_command.split()[-2:])] - .scratch.get_str_daily_schedule_hourly_org_summary()) - - elif ("print persona current tile" - in sim_command[:26].lower()): - # Print the x y tile coordinate of the persona specified in the - # prompt. - # Ex: print persona current tile Isabella Rodriguez - ret_str += str(self.personas[" ".join(sim_command.split()[-2:])] - .scratch.curr_tile) - - elif ("print persona chatting with buffer" - in sim_command.lower()): - # Print the chatting with buffer of the persona specified in the - # prompt. - # Ex: print persona chatting with buffer Isabella Rodriguez - curr_persona = self.personas[" ".join(sim_command.split()[-2:])] - for p_n, count in curr_persona.scratch.chatting_with_buffer.items(): - ret_str += f"{p_n}: {count}" - - elif ("print persona associative memory (event)" - in sim_command.lower()): - # Print the associative memory (event) of the persona specified in - # the prompt - # Ex: print persona associative memory (event) Isabella Rodriguez - ret_str += f'{self.personas[" ".join(sim_command.split()[-2:])]}\n' - ret_str += (self.personas[" ".join(sim_command.split()[-2:])] - .a_mem.get_str_seq_events()) - - elif ("print persona associative memory (thought)" - in sim_command.lower()): - # Print the associative memory (thought) of the persona specified in - # the prompt - # Ex: print persona associative memory (thought) Isabella Rodriguez - ret_str += f'{self.personas[" ".join(sim_command.split()[-2:])]}\n' - ret_str += (self.personas[" ".join(sim_command.split()[-2:])] - .a_mem.get_str_seq_thoughts()) - - elif ("print persona associative memory (chat)" - in sim_command.lower()): - # Print the associative memory (chat) of the persona specified in - # the prompt - # Ex: print persona associative memory (chat) Isabella Rodriguez - ret_str += f'{self.personas[" ".join(sim_command.split()[-2:])]}\n' - ret_str += (self.personas[" ".join(sim_command.split()[-2:])] - .a_mem.get_str_seq_chats()) - - elif ("print persona spatial memory" - in sim_command.lower()): - # Print the spatial memory of the persona specified in the prompt - # Ex: print persona spatial memory Isabella Rodriguez - self.personas[" ".join(sim_command.split()[-2:])].s_mem.print_tree() - - elif ("print current time" - in sim_command[:18].lower()): - # Print the current time of the world. - # Ex: print current time - ret_str += f'{self.curr_time.strftime("%B %d, %Y, %H:%M:%S")}\n' - ret_str += f'steps: {self.step}' - - elif ("print tile event" - in sim_command[:16].lower()): - # Print the tile events in the tile specified in the prompt - # Ex: print tile event 50, 30 - cooordinate = [int(i.strip()) for i in sim_command[16:].split(",")] - for i in self.maze.access_tile(cooordinate)["events"]: - ret_str += f"{i}\n" - - elif ("print tile details" - in sim_command.lower()): - # Print the tile details of the tile specified in the prompt - # Ex: print tile event 50, 30 - cooordinate = [int(i.strip()) for i in sim_command[18:].split(",")] - for key, val in self.maze.access_tile(cooordinate).items(): - ret_str += f"{key}: {val}\n" - - elif ("call -- analysis" - in sim_command.lower()): - # Starts a stateless chat session with the agent. It does not save - # anything to the agent's memory. - # Ex: call -- analysis Isabella Rodriguez - persona_name = sim_command[len("call -- analysis"):].strip() - self.personas[persona_name].open_convo_session("analysis") - - elif ("call -- load history" - in sim_command.lower()): - curr_file = maze_assets_loc + "/" + sim_command[len("call -- load history"):].strip() - # call -- load history the_ville/agent_history_init_n3.csv - - rows = read_file_to_list(curr_file, header=True, strip_trail=True)[1] - clean_whispers = [] - for row in rows: - agent_name = row[0].strip() - whispers = row[1].split(";") - whispers = [whisper.strip() for whisper in whispers] - for whisper in whispers: - clean_whispers += [[agent_name, whisper]] - - load_history_via_whisper(self.personas, clean_whispers) - - print (ret_str) - - except: - traceback.print_exc() - print ("Error.") + except KeyError as e: + logger.error(f"Error: Persona name not found: {e}. Please check the persona name and try again.", exc_info=True) # User-facing via print + print(f"错误:未找到角色名:{e}。请检查角色名后重试。") + except ValueError as e: + logger.error(f"Error: Invalid value in command. {e}", exc_info=True) # User-facing via print + print(f"错误:命令中包含无效值。{e}") + except FileNotFoundError as e: + logger.error(f"Error: File not found. {e}", exc_info=True) # User-facing via print + print(f"错误:文件未找到。{e}") + except IOError as e: + logger.error(f"Error: Input/Output error. {e}", exc_info=True) # User-facing via print + print(f"错误:输入/输出错误。{e}") + except OSError as e: + logger.error(f"Error: Operating system error. {e}", exc_info=True) # User-facing via print + print(f"错误:操作系统错误。{e}") + except Exception as e: + logger.error(f"An unexpected error occurred in open_server command processing: {sim_command}. Error: {e}", exc_info=True) # User-facing via print + print (f"发生意外错误。请查看日志了解详情。命令: {sim_command}") pass if __name__ == '__main__': + # Configure logger for the main execution as well if needed, or rely on module-level config + # Basic config for when the script is run directly, to ensure logs are seen if not imported + if not logger.handlers: # Avoid adding handlers multiple times if already configured + main_handler = logging.StreamHandler() + main_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + main_handler.setFormatter(main_formatter) + logging.basicConfig(handlers=[main_handler], level=logging.INFO) + # rs = ReverieServer("base_the_ville_isabella_maria_klaus", # "July1_the_ville_isabella_maria_klaus-step-3-1") # rs = ReverieServer("July1_the_ville_isabella_maria_klaus-step-3-20", # "July1_the_ville_isabella_maria_klaus-step-3-21") # rs.open_server() - - origin = input("Enter the name of the forked simulation: ").strip() - target = input("Enter the name of the new simulation: ").strip() - - rs = ReverieServer(origin, target) - rs.open_server() + try: + origin = input("请输入源模拟的名称: ").strip() + target = input("请输入新模拟的名称: ").strip() + + logger.info(f"Initializing ReverieServer with origin: {origin}, target: {target}") # Internal log + rs = ReverieServer(origin, target) # rs is the instance of ReverieServer + rs.open_server() + except Exception as e: + logger.critical(f"A critical error occurred at the main execution level: {e}", exc_info=True) # User-facing via print + print(f"发生严重错误: {e}。请查看日志获取更多详情。") diff --git a/reverie/backend_server/test_reverie.py b/reverie/backend_server/test_reverie.py new file mode 100644 index 0000000000..61c2038cdc --- /dev/null +++ b/reverie/backend_server/test_reverie.py @@ -0,0 +1,519 @@ +import unittest +from unittest.mock import patch, mock_open, MagicMock, call +import os +import sys +import json +import datetime + +# Add the parent directory to sys.path to allow imports from reverie.backend_server +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) + +# Attempt to pre-patch the problematic global variable at the earliest point. +# This is to address the NameError for 'openai_api_key' in gpt_structure.py during import. +patched_openai_api_key = False +try: + # Ensure the module path exists for import + import reverie.backend_server.persona.prompt_template.gpt_structure as gpt_module + gpt_module.openai_api_key = "sk-dummy-test-key-early-set" + # print("DEBUG: Successfully set openai_api_key in gpt_structure module.") + patched_openai_api_key = True +except Exception as e: + # print(f"DEBUG: Error setting openai_api_key directly: {e}") + # Fallback to trying a patch if direct set fails or module not found yet. + # This patch will only work if the module is loaded *after* this line. + # The NameError happens *inside* gpt_structure.py, so the module must exist for patch to target it. + pass + +if not patched_openai_api_key: + # If direct set failed, try to use patch. This is less likely to solve an import-time NameError + # for the variable itself, but it's here as a fallback attempt. + # The core issue is when gpt_structure.py is first executed. + # print("DEBUG: Attempting patch for openai_api_key as direct set might have failed.") + gpt_structure_patcher = patch('reverie.backend_server.persona.prompt_template.gpt_structure.openai_api_key', 'sk-dummy-via-patch', create=True) + try: + gpt_structure_patcher.start() + # print("DEBUG: Started patch for openai_api_key.") + patched_openai_api_key = True # Record that patch was attempted + except Exception as e: + # print(f"DEBUG: Failed to start patch for openai_api_key: {e}") + pass + + +# Continue with other imports +import unittest +from unittest.mock import patch, mock_open, MagicMock, call # unittest.mock.patch is already imported by name 'patch' +import json # Already imported in ReverieServer, but good practice for clarity if used here +import datetime # Already imported in ReverieServer + +# Now import the class to be tested +from reverie.backend_server.reverie import ReverieServer + +# Mock global variables that ReverieServer expects +# These would normally be imported from global_methods or utils +# For testing, we define them or mock them as needed. +global_fs_storage = "mock_storage" +global_fs_temp_storage = "mock_temp_storage" +global_maze_assets_loc = "mock_maze_assets" + +class TestReverieServer(unittest.TestCase): + + def setUp(self): + """ + Set up for test methods. This method is called before each test method. + It involves mocking all external dependencies of ReverieServer.__init__ + """ + self.fork_sim_code = "test_fork_sim" + self.sim_code = "test_sim" + + # Mock meta.json data + self.mock_reverie_meta = { + "fork_sim_code": self.fork_sim_code, + "start_date": "January 1, 2023", + "curr_time": "January 1, 2023, 00:00:00", + "sec_per_step": 60, + "maze_name": "test_maze", + "persona_names": ["Alice", "Bob"], + "step": 0 + } + + # Mock initial environment file data + self.mock_init_env = { + "Alice": {"x": 10, "y": 20}, + "Bob": {"x": 15, "y": 25} + } + + # Patch 'copyanything' + self.patch_copyanything = patch('reverie.backend_server.reverie.copyanything') + self.mock_copyanything = self.patch_copyanything.start() + + # Patch 'open' for meta.json and environment.json + # Need to handle multiple open calls with different behaviors + def mock_open_side_effect(filepath, *args, **kwargs): + if f"{global_fs_storage}/{self.sim_code}/reverie/meta.json" in filepath and args[0] == 'r': + return mock_open(read_data=json.dumps(self.mock_reverie_meta))() + elif f"{global_fs_storage}/{self.sim_code}/reverie/meta.json" in filepath and args[0] == 'w': + return mock_open()() # For writing meta + elif f"{global_fs_storage}/{self.sim_code}/environment/{self.mock_reverie_meta['step']}.json" in filepath: + return mock_open(read_data=json.dumps(self.mock_init_env))() + elif f"{global_fs_temp_storage}/curr_sim_code.json" in filepath and args[0] == 'w': + return mock_open()() + elif f"{global_fs_temp_storage}/curr_step.json" in filepath and args[0] == 'w': + return mock_open()() + # Fallback for other open calls if any + return mock_open()() + + self.patch_open = patch('builtins.open', side_effect=mock_open_side_effect) + self.mock_builtin_open = self.patch_open.start() + + # Patch json.load and json.dump if necessary, though mock_open handles read_data for load + self.patch_json_dump = patch('json.dump') + self.mock_json_dump = self.patch_json_dump.start() + + # Patch Maze class + self.patch_maze = patch('reverie.backend_server.reverie.Maze') + self.MockMazeClass = self.patch_maze.start() + self.mock_maze_instance = self.MockMazeClass.return_value + self.mock_maze_instance.access_tile.return_value = {"events": []} # Default + self.mock_maze_instance.tiles = [[{"events": set()} for _ in range(50)] for _ in range(50)] # Mock a basic tile structure + + # Patch Persona class + self.patch_persona = patch('reverie.backend_server.reverie.Persona') + self.MockPersonaClass = self.patch_persona.start() + self.mock_alice_persona = MagicMock() + self.mock_alice_persona.name = "Alice" + self.mock_alice_persona.scratch.get_curr_event_and_desc.return_value = ("Alice_event", "Alice_desc") + self.mock_bob_persona = MagicMock() + self.mock_bob_persona.name = "Bob" + self.mock_bob_persona.scratch.get_curr_event_and_desc.return_value = ("Bob_event", "Bob_desc") + + # Side effect for Persona instantiation + def persona_side_effect(name, folder): + if name == "Alice": + return self.mock_alice_persona + elif name == "Bob": + return self.mock_bob_persona + return MagicMock() + self.MockPersonaClass.side_effect = persona_side_effect + + # Patch logger + self.patch_logger = patch('reverie.backend_server.reverie.logger') + self.mock_logger = self.patch_logger.start() + + # Patch shutil.rmtree + self.patch_shutil_rmtree = patch('shutil.rmtree') + self.mock_shutil_rmtree = self.patch_shutil_rmtree.start() + + # Patch global storage variables as they are used to construct paths + self.patch_fs_storage = patch('reverie.backend_server.reverie.fs_storage', global_fs_storage) + self.mock_fs_storage = self.patch_fs_storage.start() + + self.patch_fs_temp_storage = patch('reverie.backend_server.reverie.fs_temp_storage', global_fs_temp_storage) + self.mock_fs_temp_storage = self.patch_fs_temp_storage.start() + + self.patch_maze_assets_loc = patch('reverie.backend_server.reverie.maze_assets_loc', global_maze_assets_loc) + self.mock_maze_assets_loc = self.patch_maze_assets_loc.start() + + # Patch datetime.datetime.strptime + self.patch_strptime = patch('datetime.datetime') + self.mock_datetime = self.patch_strptime.start() + self.mock_datetime.strptime.return_value = datetime.datetime(2023, 1, 1, 0, 0, 0) + self.mock_datetime.now.return_value = datetime.datetime(2023, 1, 1, 0, 0, 0) # If needed + self.mock_datetime.timedelta = datetime.timedelta # Use real timedelta + + # Patch time.sleep + self.patch_time_sleep = patch('time.sleep', return_value=None) + self.mock_time_sleep = self.patch_time_sleep.start() + + # Patch read_file_to_list (assuming it's in utils or global_methods) + # This needs to point to the correct module where read_file_to_list is defined. + # Assuming it's in 'reverie.backend_server.reverie' for now if it's a global method there + # Or 'reverie.backend_server.utils.read_file_to_list' if it's in utils.py + # For now, let's assume it's available in reverie.py's scope + self.patch_read_file = patch('reverie.backend_server.reverie.read_file_to_list') + self.mock_read_file_to_list = self.patch_read_file.start() + + # Patch load_history_via_whisper + self.patch_load_history = patch('reverie.backend_server.reverie.load_history_via_whisper') + self.mock_load_history_via_whisper = self.patch_load_history.start() + + # Patch check_if_file_exists, assuming it's available in reverie.py's scope + # (e.g., imported from utils or global_methods) + self.patch_check_file = patch('reverie.backend_server.reverie.check_if_file_exists') + self.mock_check_if_file_exists = self.patch_check_file.start() + # Default to file not existing, tests can change this per case + self.mock_check_if_file_exists.return_value = False + + # Patch openai_api_key in gpt_structure.py to prevent NameError during import chain + # This needs to be active *before* ReverieServer and its imports are fully processed. + # One way is to patch it in the module where it's accessed. + self.patch_openai_api_key = patch('reverie.backend_server.persona.prompt_template.gpt_structure.openai_api_key', 'sk-testkey') + # Start it early, before ReverieServer is potentially re-imported or its modules fully loaded by tests. + # However, setUp runs *after* the test module (and ReverieServer) is imported. + # This patch is tricky due to import-time error. + # A better place might be at the class level or module level of the test file. + # For now, let's try starting it here. If it fails, will move to class level. + try: + self.mock_openai_api_key = self.patch_openai_api_key.start() + except AttributeError: # Handle if already started or module not loaded yet in a way patch expects + # This can happen if tests are run in a way that gpt_structure isn't fully on sys.modules yet for patching + # For now, we'll assume this will work or adjust. + pass + + + # Instantiate the server + # We need to ensure that all global variables like fs_storage are patched *before* this. + self.server = ReverieServer(self.fork_sim_code, self.sim_code) + + # Clear mock calls from setup to have clean slate for each test + self.mock_logger.reset_mock() + self.mock_copyanything.reset_mock() + self.mock_builtin_open.reset_mock() + self.MockMazeClass.reset_mock() + self.MockPersonaClass.reset_mock() + self.mock_shutil_rmtree.reset_mock() + self.mock_json_dump.reset_mock() + self.mock_load_history_via_whisper.reset_mock() + self.mock_read_file_to_list.reset_mock() + self.mock_check_if_file_exists.reset_mock() + + + def tearDown(self): + """ + Clean up after test methods. This method is called after each test method. + """ + self.patch_copyanything.stop() + self.patch_open.stop() + self.patch_json_dump.stop() + self.patch_maze.stop() + self.patch_persona.stop() + self.patch_logger.stop() + self.patch_shutil_rmtree.stop() + self.patch_fs_storage.stop() + self.patch_fs_temp_storage.stop() + self.patch_maze_assets_loc.stop() + self.patch_strptime.stop() + self.patch_time_sleep.stop() + self.patch_read_file.stop() + self.patch_load_history.stop() + self.patch_check_file.stop() + # Stop the openai_api_key patch if it was started + if hasattr(self, 'mock_openai_api_key') and self.patch_openai_api_key.is_started: + self.patch_openai_api_key.stop() + + # --- Test _handle_lifecycle_commands --- + def test_handle_lifecycle_fin_command(self): + self.server.save = MagicMock() + ret_str, should_break = self.server._handle_lifecycle_commands("fin", self.server.sim_code) + self.server.save.assert_called_once() + self.mock_logger.info.assert_any_call("Simulation saved. Exiting.") + self.assertEqual(ret_str, "") + self.assertTrue(should_break) + + def test_handle_lifecycle_exit_command(self): + ret_str, should_break = self.server._handle_lifecycle_commands("exit", f"{global_fs_storage}/{self.server.sim_code}") + self.mock_shutil_rmtree.assert_called_once_with(f"{global_fs_storage}/{self.server.sim_code}") + self.mock_logger.info.assert_any_call("Exiting.") + self.assertEqual(ret_str, "") + self.assertTrue(should_break) + + def test_handle_lifecycle_save_command(self): + self.server.save = MagicMock() + ret_str, should_break = self.server._handle_lifecycle_commands("save", self.server.sim_code) + self.server.save.assert_called_once() + self.assertEqual(ret_str, "Simulation progress saved.") + self.assertFalse(should_break) + + def test_handle_lifecycle_run_command_valid(self): + self.server.start_server = MagicMock() + ret_str, should_break = self.server._handle_lifecycle_commands("run 100", self.server.sim_code) + self.server.start_server.assert_called_once_with(100) + self.assertEqual(ret_str, "Finished running 100 steps.") + self.assertFalse(should_break) + + def test_handle_lifecycle_run_command_invalid_steps(self): + self.server.start_server = MagicMock() + ret_str, should_break = self.server._handle_lifecycle_commands("run abc", self.server.sim_code) + self.server.start_server.assert_not_called() + self.assertEqual(ret_str, "Error: Invalid number of steps. Please provide an integer.") + self.mock_logger.error.assert_called_with( + "Error: Invalid number of steps. Please provide an integer. Command: 'run abc'", + exc_info=True + ) + self.assertFalse(should_break) + + def test_handle_lifecycle_run_command_missing_steps(self): + self.server.start_server = MagicMock() + ret_str, should_break = self.server._handle_lifecycle_commands("run", self.server.sim_code) + self.server.start_server.assert_not_called() + self.assertEqual(ret_str, "Error: Number of steps not specified.") + self.mock_logger.error.assert_called_with( + "Error: Number of steps not specified. Command: 'run'" + ) # exc_info might not be True here based on current code in handler + self.assertFalse(should_break) + + # --- Test _handle_persona_info_commands --- + def test_handle_persona_info_schedule_existing_persona(self): + self.mock_alice_persona.scratch.get_str_daily_schedule_summary.return_value = "Alice's schedule" + ret_str = self.server._handle_persona_info_commands("print persona schedule Alice") + self.assertEqual(ret_str, "Alice's schedule") + self.mock_alice_persona.scratch.get_str_daily_schedule_summary.assert_called_once() + + def test_handle_persona_info_schedule_non_existing_persona(self): + ret_str = self.server._handle_persona_info_commands("print persona schedule Charlie") + self.assertEqual(ret_str, "Error: Persona 'Charlie' not found.") + self.mock_logger.warning.assert_called_with("Persona 'Charlie' not found for 'print persona schedule'.") + + def test_handle_persona_info_all_schedules(self): + self.mock_alice_persona.scratch.get_str_daily_schedule_summary.return_value = "Alice's schedule" + self.mock_bob_persona.scratch.get_str_daily_schedule_summary.return_value = "Bob's schedule" + ret_str = self.server._handle_persona_info_commands("print all persona schedule") + self.assertIn("Alice\nAlice's schedule\n---\n", ret_str) + self.assertIn("Bob\nBob's schedule\n---\n", ret_str) + + def test_handle_persona_info_spatial_memory_existing_persona(self): + self.mock_alice_persona.s_mem.print_tree = MagicMock() + ret_str = self.server._handle_persona_info_commands("print persona spatial memory Alice") + self.assertEqual(ret_str, "Spatial memory for Alice printed above.") + self.mock_alice_persona.s_mem.print_tree.assert_called_once() + + def test_handle_persona_info_hourly_org_schedule_existing_persona(self): + self.mock_alice_persona.scratch.get_str_daily_schedule_hourly_org_summary.return_value = "Alice's hourly schedule" + ret_str = self.server._handle_persona_info_commands("print hourly org persona schedule Alice") + self.assertEqual(ret_str, "Alice's hourly schedule") + self.mock_alice_persona.scratch.get_str_daily_schedule_hourly_org_summary.assert_called_once() + + def test_handle_persona_info_current_tile_non_existing_persona(self): + ret_str = self.server._handle_persona_info_commands("print persona current tile Charlie") + self.assertEqual(ret_str, "Error: Persona 'Charlie' not found.") + self.mock_logger.warning.assert_called_with("Persona 'Charlie' not found for 'print persona current tile'.") + + def test_handle_persona_info_chatting_with_buffer_existing_persona(self): + self.mock_bob_persona.scratch.chatting_with_buffer = {"Alice": 5} + ret_str = self.server._handle_persona_info_commands("print persona chatting with buffer Bob") + self.assertEqual(ret_str, "Alice: 5\n") + + def test_handle_persona_info_associative_memory_event_non_existing(self): + ret_str = self.server._handle_persona_info_commands("print persona associative memory (event) Charlie") + self.assertEqual(ret_str, "Error: Persona 'Charlie' not found.") + self.mock_logger.warning.assert_called_with("Persona 'Charlie' not found for 'print persona associative memory (event)'.") + + def test_handle_persona_info_associative_memory_thought_existing(self): + self.mock_alice_persona.a_mem.get_str_seq_thoughts.return_value = "Alice's thoughts" + ret_str = self.server._handle_persona_info_commands("print persona associative memory (thought) Alice") + self.assertEqual(ret_str, "Alice\nAlice's thoughts") # Assumes self.mock_alice_persona.name is "Alice" + self.mock_alice_persona.a_mem.get_str_seq_thoughts.assert_called_once() + + def test_handle_persona_info_associative_memory_chat_existing(self): + self.mock_bob_persona.a_mem.get_str_seq_chats.return_value = "Bob's chats" + ret_str = self.server._handle_persona_info_commands("print persona associative memory (chat) Bob") + self.assertEqual(ret_str, "Bob\nBob's chats") # Assumes self.mock_bob_persona.name is "Bob" + self.mock_bob_persona.a_mem.get_str_seq_chats.assert_called_once() + + + # --- Test _handle_world_info_commands --- + def test_handle_world_info_current_time(self): + # server.curr_time is datetime(2023,1,1) and server.step is 0 from setup + expected_time_str = datetime.datetime(2023, 1, 1, 0, 0, 0).strftime("%B %d, %Y, %H:%M:%S") + expected_ret_str = f"{expected_time_str}\nsteps: 0" + ret_str = self.server._handle_world_info_commands("print current time") + self.assertEqual(ret_str, expected_ret_str) + + def test_handle_world_info_tile_event_valid_coords(self): + self.server.maze.access_tile.return_value = {"events": ["event1", "event2"]} + ret_str = self.server._handle_world_info_commands("print tile event 10, 20") + self.server.maze.access_tile.assert_called_once_with([10, 20]) + self.assertEqual(ret_str, "event1\nevent2\n") + + def test_handle_world_info_tile_event_invalid_coords_format(self): + ret_str = self.server._handle_world_info_commands("print tile event 10_20") # Normalized to lowercase in open_server + self.assertEqual(ret_str, "Error: Invalid coordinates. Expected format: X, Y. Received: 10_20") + self.mock_logger.error.assert_called_with( + "Error: Invalid coordinates for 'print tile event'. Expected format: X, Y. Received: 10_20", + exc_info=True + ) + + def test_handle_world_info_tile_event_incomplete_coords(self): + ret_str = self.server._handle_world_info_commands("print tile event 10") # Normalized + self.assertEqual(ret_str, "Error: Invalid coordinates. Expected format: X, Y. Received: 10") + self.mock_logger.error.assert_called_with( + "Error: Invalid coordinates for 'print tile event'. Expected format: X, Y. Received: 10", + exc_info=True + ) + + def test_handle_world_info_tile_details_valid_coords(self): + self.server.maze.access_tile.return_value = {"name": "TileA", "state": "active"} + ret_str = self.server._handle_world_info_commands("print tile details 30, 40") + self.server.maze.access_tile.assert_called_once_with([30, 40]) + self.assertIn("name: TileA\n", ret_str) + self.assertIn("state: active\n", ret_str) + + def test_handle_world_info_tile_details_invalid_coords(self): + ret_str = self.server._handle_world_info_commands("print tile details invalid") + self.assertEqual(ret_str, "Error: Invalid coordinates. Expected format: X, Y. Received: invalid") + self.mock_logger.error.assert_called_with( + "Error: Invalid coordinates for 'print tile details'. Expected format: X, Y. Received: invalid", + exc_info=True + ) + + # --- Test _handle_call_commands --- + def test_handle_call_analysis_existing_persona(self): + self.mock_alice_persona.open_convo_session = MagicMock() + ret_str = self.server._handle_call_commands("call -- analysis Alice") + self.mock_alice_persona.open_convo_session.assert_called_once_with("analysis") + self.assertEqual(ret_str, "Conversation session 'analysis' opened for Alice.") + + def test_handle_call_analysis_non_existing_persona(self): + ret_str = self.server._handle_call_commands("call -- analysis Charlie") # Normalized + self.assertEqual(ret_str, "Error: Persona 'Charlie' not found.") + self.mock_logger.warning.assert_called_with("Persona 'Charlie' not found for 'call -- analysis'.") + + def test_handle_call_load_history_valid_file(self): + self.mock_read_file_to_list.return_value = ( + ["header1", "header2"], # Mock header row + [["Alice", "whisper1;whisper2"], ["Bob", "whisper3"]] # Mock data rows + ) + ret_str = self.server._handle_call_commands("call -- load history test_history.csv") # Normalized + + expected_clean_whispers = [ + ["Alice", "whisper1"], + ["Alice", "whisper2"], + ["Bob", "whisper3"] + ] + self.mock_load_history_via_whisper.assert_called_once_with(self.server.personas, expected_clean_whispers) + self.assertEqual(ret_str, f"History loaded from {global_maze_assets_loc}/test_history.csv.") + self.mock_logger.info.assert_any_call(f"Successfully loaded history from {global_maze_assets_loc}/test_history.csv, 3 whispers processed.") + + def test_handle_call_load_history_file_not_found(self): + self.mock_read_file_to_list.side_effect = FileNotFoundError("File not found") + ret_str = self.server._handle_call_commands("call -- load history non_existent.csv") # Normalized + self.assertEqual(ret_str, f"Error: History file not found at {global_maze_assets_loc}/non_existent.csv") + self.mock_logger.error.assert_called_with( + f"Error: History file not found at {global_maze_assets_loc}/non_existent.csv", + exc_info=True + ) + + def test_handle_call_load_history_io_error(self): + self.mock_read_file_to_list.side_effect = IOError("Cannot read file") + ret_str = self.server._handle_call_commands("call -- load history bad_perms.csv") # Normalized + self.assertEqual(ret_str, f"Error: Could not read history file at {global_maze_assets_loc}/bad_perms.csv.") + self.mock_logger.error.assert_called_with( + f"Error: Could not read history file at {global_maze_assets_loc}/bad_perms.csv. IO Error: Cannot read file", + exc_info=True + ) + + def test_handle_call_load_history_malformed_row_index_error(self): + # Simulate a row that's too short, causing IndexError when accessing row[1] + self.mock_read_file_to_list.return_value = ( + ["header"], + [["Alice"]] # Malformed row, missing whisper string + ) + ret_str = self.server._handle_call_commands("call -- load history malformed.csv") # Normalized + self.assertEqual(ret_str, f"Error: Malformed history file or data at {global_maze_assets_loc}/malformed.csv.") + self.mock_logger.error.assert_any_call( # Check if any error log matches this, as exact error details might vary + f"Error: Malformed history file or data at {global_maze_assets_loc}/malformed.csv.", + exc_info=True + ) + + + # --- Test _handle_path_tester_command --- + def test_handle_path_tester_command(self): + self.server.start_path_tester_server = MagicMock() + sim_folder_path = f"{global_fs_storage}/{self.sim_code}" + + ret_str, should_break = self.server._handle_path_tester_command("start path tester mode", sim_folder_path) + + self.mock_shutil_rmtree.assert_called_once_with(sim_folder_path) + self.server.start_path_tester_server.assert_called_once() + self.assertTrue(should_break) + self.assertEqual(ret_str, "") # Path tester command doesn't return a string to print + self.mock_logger.info.assert_any_call("Exiting after path tester mode.") + + # --- Test open_server main loop dispatch --- + @patch('builtins.input', side_effect=["save", "fin"]) # Simulate user inputs + def test_open_server_dispatch_save_and_fin(self, mock_input): + # Mock handler methods to check if they are called + self.server._handle_lifecycle_commands = MagicMock(side_effect=[ + ("Simulation progress saved.", False), # for "save" + ("", True) # for "fin" + ]) + + # No need to catch SystemExit if loop breaks normally + self.server.open_server() + + self.server._handle_lifecycle_commands.assert_any_call("save", f"{global_fs_storage}/{self.sim_code}") + self.server._handle_lifecycle_commands.assert_any_call("fin", f"{global_fs_storage}/{self.sim_code}") + self.assertEqual(self.server._handle_lifecycle_commands.call_count, 2) + # Check that print was called with the ret_str from "save" + # This requires patching 'print' or capturing stdout, which can be complex. + # For now, we trust ret_str is handled by open_server's print call. + + @patch('builtins.input', side_effect=["print persona schedule Alice", "unknown command", "f"]) + def test_open_server_dispatch_persona_unknown_fin(self, mock_input): + self.server._handle_persona_info_commands = MagicMock(return_value="Alice's schedule") + self.server._handle_lifecycle_commands = MagicMock(return_value=("", True)) # for "f" (fin) + + self.server.open_server() + + self.server._handle_persona_info_commands.assert_called_once_with("print persona schedule alice") # Normalized + self.mock_logger.warning.assert_called_with("Unknown command: unknown command") + self.server._handle_lifecycle_commands.assert_called_once_with("f", f"{global_fs_storage}/{self.sim_code}") + +if __name__ == '__main__': + # Need to redefine these globals here if running the test file directly + # or ensure they are available in the scope where ReverieServer is imported +# This is a bit of a hack for the direct execution environment. +# Ideally, tests are run by a test runner that handles paths correctly. +import reverie.backend_server.reverie as reverie_module_for_globals + +# Global mocks for fs_storage etc. for when ReverieServer module is loaded. +reverie_module_for_globals.fs_storage = global_fs_storage +reverie_module_for_globals.fs_temp_storage = global_fs_temp_storage +reverie_module_for_globals.maze_assets_loc = global_maze_assets_loc + +# If patch was started, ensure it's stopped +def tearDownModule(): + if patched_openai_api_key and 'gpt_structure_patcher' in globals() and gpt_structure_patcher.is_started: + # print("DEBUG: Stopping patch for openai_api_key.") + gpt_structure_patcher.stop() + + unittest.main(argv=['first-arg-is-ignored'], exit=False) From 2c9cc11126c08d9a904e954bb3527f76326ea392 Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Mon, 26 May 2025 12:47:08 +0000 Subject: [PATCH 2/2] Refactor backend, add tests, and perform partial Chinese translation. This commit includes several improvements and a partial translation of the Generative Agents project into Chinese. Key changes: - Improved error handling and implemented logging in `reverie/backend_server/reverie.py`. - Refactored the `open_server` method in `reverie.py` for better readability and maintainability. - Added unit tests for `reverie.py`. - Investigated frontend glitches: Changed Phaser rendering mode to CANVAS in home template as a diagnostic step. - Translated user-facing text in `reverie.py` to Chinese. Full-Scale Translation Effort (Partial Completion): - Translated Python comments and relevant string literals in: - `reverie/backend_server/persona/cognitive_modules/` - `reverie/backend_server/persona/memory_structures/` - `reverie/backend_server/persona/persona.py` - Remaining Python files in `reverie/backend_server/` (global_methods.py, maze.py, path_finder.py, test.py) - Python files in `environment/frontend_server/frontend_server/` (and its settings/ subdirectory) - Python files in `environment/frontend_server/translator/` - Note: For many of these Python files, I re-confirmed previous translations against more detailed guidelines. Blocked/Skipped Tasks: - Dependency Management: My attempt to migrate to Poetry was blocked by an error (`Could not parse version constraint: ==*`). I skipped this as per your instruction. - LLM Prompt Translation (`.txt` files): Translation of `.txt` files in `reverie/backend_server/persona/prompt_template/` was skipped due to a persistent error that prevented me from saving changes to these files. Further work would involve translating HTML templates, JavaScript, and the README, and resolving the issue with `.txt` file modifications. --- .../frontend_server/settings/__init__.py | 4 +- .../frontend_server/settings/base.py | 20 +- .../frontend_server/settings/local.py | 20 +- .../frontend_server/frontend_server/urls.py | 24 +-- .../frontend_server/frontend_server/wsgi.py | 6 +- .../frontend_server/translator/tests.py | 2 +- .../frontend_server/translator/views.py | 75 ++++---- reverie/backend_server/path_finder.py | 49 +++-- .../prompt_template/defunct_run_gpt_prompt.py | 174 +++++++++--------- .../persona/prompt_template/gpt_structure.py | 124 ++++++------- .../persona/prompt_template/print_prompt.py | 20 +- reverie/backend_server/test.py | 51 +++-- 12 files changed, 273 insertions(+), 296 deletions(-) diff --git a/environment/frontend_server/frontend_server/settings/__init__.py b/environment/frontend_server/frontend_server/settings/__init__.py index c0f530ad8f..09edeb3617 100644 --- a/environment/frontend_server/frontend_server/settings/__init__.py +++ b/environment/frontend_server/frontend_server/settings/__init__.py @@ -1,4 +1,4 @@ -###FOR PUSHING STATIC TO AWS +###用于将静态文件推送到 AWS @@ -15,7 +15,7 @@ -###FOR GENERAL USES +###通用配置 diff --git a/environment/frontend_server/frontend_server/settings/base.py b/environment/frontend_server/frontend_server/settings/base.py index ed315458ef..3af226ede2 100644 --- a/environment/frontend_server/frontend_server/settings/base.py +++ b/environment/frontend_server/frontend_server/settings/base.py @@ -12,23 +12,23 @@ import os -# Build paths inside the project like this: os.path.join(BASE_DIR, ...) +# 项目内部的路径构建方式如下: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -# Quick-start development settings - unsuitable for production -# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ +# 快速启动开发设置 - 不适用于生产环境 +# 参见 https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ -# SECURITY WARNING: keep the secret key used in production secret! +# 安全警告: 生产环境中使用的密钥必须保密! SECRET_KEY = 'c7l%1%b=2sh$o9zqvd4i*h8*__^@-5sm-y)m(1ib2t92)43@62' -# SECURITY WARNING: don't run with debug turned on in production! +# 安全警告: 不要在生产环境中启用调试模式! DEBUG = True ALLOWED_HOSTS = [] -# Application definition +# 应用定义 INSTALLED_APPS = [ 'django.contrib.admin', @@ -74,7 +74,7 @@ WSGI_APPLICATION = 'frontend_server.wsgi.application' -# Database +# 数据库 # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { @@ -85,7 +85,7 @@ } -# Password validation +# 密码验证 # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ @@ -104,7 +104,7 @@ ] -# Internationalization +# 国际化 # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' @@ -118,7 +118,7 @@ USE_TZ = True -# Static files (CSS, JavaScript, Images) +# 静态文件 (CSS, JavaScript, 图片) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' diff --git a/environment/frontend_server/frontend_server/settings/local.py b/environment/frontend_server/frontend_server/settings/local.py index d574be9f8f..a274ca798e 100644 --- a/environment/frontend_server/frontend_server/settings/local.py +++ b/environment/frontend_server/frontend_server/settings/local.py @@ -12,23 +12,23 @@ import os -# Build paths inside the project like this: os.path.join(BASE_DIR, ...) +# 项目内部的路径构建方式如下: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -# Quick-start development settings - unsuitable for production -# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ +# 快速启动开发设置 - 不适用于生产环境 +# 参见 https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ -# SECURITY WARNING: keep the secret key used in production secret! +# 安全警告: 生产环境中使用的密钥必须保密! SECRET_KEY = 'c7l%1%b=2sh$o9zqvd4i*h8*__^@-5sm-y)m(1ib2t92)43@62' -# SECURITY WARNING: don't run with debug turned on in production! +# 安全警告: 不要在生产环境中启用调试模式! DEBUG = True ALLOWED_HOSTS = [] -# Application definition +# 应用定义 INSTALLED_APPS = [ 'django.contrib.admin', @@ -74,7 +74,7 @@ WSGI_APPLICATION = 'frontend_server.wsgi.application' -# Database +# 数据库 # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { @@ -85,7 +85,7 @@ } -# Password validation +# 密码验证 # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ @@ -104,7 +104,7 @@ ] -# Internationalization +# 国际化 # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' @@ -118,7 +118,7 @@ USE_TZ = True -# Static files (CSS, JavaScript, Images) +# 静态文件 (CSS, JavaScript, 图片) # https://docs.djangoproject.com/en/2.2/howto/static-files/ diff --git a/environment/frontend_server/frontend_server/urls.py b/environment/frontend_server/frontend_server/urls.py index 54d76d6de3..c81c003eb1 100644 --- a/environment/frontend_server/frontend_server/urls.py +++ b/environment/frontend_server/frontend_server/urls.py @@ -1,17 +1,17 @@ -"""frontend_server URL Configuration +"""frontend_server URL 配置 -The `urlpatterns` list routes URLs to views. For more information please see: +`urlpatterns` 列表将 URL 路由到视图。更多信息请参见: https://docs.djangoproject.com/en/2.2/topics/http/urls/ -Examples: -Function views - 1. Add an import: from my_app import views - 2. Add a URL to urlpatterns: path('', views.home, name='home') -Class-based views - 1. Add an import: from other_app.views import Home - 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') -Including another URLconf - 1. Import the include() function: from django.urls import include, path - 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +示例: +函数视图 + 1. 添加导入: from my_app import views + 2. 向 urlpatterns 添加一个 URL: path('', views.home, name='home') +基于类的视图 + 1. 添加导入: from other_app.views import Home + 2. 向 urlpatterns 添加一个 URL: path('', Home.as_view(), name='home') +包含另一个 URLconf + 1. 导入 include() 函数: from django.urls import include, path + 2. 向 urlpatterns 添加一个 URL: path('blog/', include('blog.urls')) """ from django.conf.urls import include, url from django.urls import path diff --git a/environment/frontend_server/frontend_server/wsgi.py b/environment/frontend_server/frontend_server/wsgi.py index af28ad8d3d..058400d0cd 100644 --- a/environment/frontend_server/frontend_server/wsgi.py +++ b/environment/frontend_server/frontend_server/wsgi.py @@ -1,9 +1,9 @@ """ -WSGI config for frontend_server project. +frontend_server 项目的 WSGI 配置。 -It exposes the WSGI callable as a module-level variable named ``application``. +它将 WSGI 可调用对象公开为名为 ``application`` 的模块级变量。 -For more information on this file, see +有关此文件的更多信息,请参阅 https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ diff --git a/environment/frontend_server/translator/tests.py b/environment/frontend_server/translator/tests.py index 7ce503c2dd..1e3ce586fd 100644 --- a/environment/frontend_server/translator/tests.py +++ b/environment/frontend_server/translator/tests.py @@ -1,3 +1,3 @@ from django.test import TestCase -# Create your tests here. +# 在此处创建您的测试。 diff --git a/environment/frontend_server/translator/views.py b/environment/frontend_server/translator/views.py index 20b8a29bba..1a9be40393 100644 --- a/environment/frontend_server/translator/views.py +++ b/environment/frontend_server/translator/views.py @@ -1,6 +1,6 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) -File: views.py +作者: Joon Sung Park (joonspk@stanford.edu) +文件: views.py """ import os import string @@ -32,7 +32,7 @@ def demo(request, sim_code, step, play_speed="2"): if play_speed not in play_speed_opt: play_speed = 2 else: play_speed = play_speed_opt[play_speed] - # Loading the basic meta information about the simulation. + # 加载关于模拟的基本元信息。 meta = dict() with open (meta_file) as json_file: meta = json.load(json_file) @@ -44,12 +44,12 @@ def demo(request, sim_code, step, play_speed="2"): start_datetime += datetime.timedelta(seconds=sec_per_step) start_datetime = start_datetime.strftime("%Y-%m-%dT%H:%M:%S") - # Loading the movement file + # 加载移动文件 raw_all_movement = dict() with open(move_file) as json_file: raw_all_movement = json.load(json_file) - # Loading all names of the personas + # 加载所有角色的名称 persona_names = dict() persona_names = [] persona_names_set = set() @@ -59,15 +59,13 @@ def demo(request, sim_code, step, play_speed="2"): "initial": p[0] + p.split(" ")[-1][0]}] persona_names_set.add(p) - # is the main movement variable that we are passing to the - # frontend. Whereas we use ajax scheme to communicate steps to the frontend - # during the simulation stage, for this demo, we send all movement - # information in one step. + # 是我们传递给前端的主要移动变量。 + # 在模拟阶段,我们使用 ajax 方案与前端通信步骤,但在此演示中, + # 我们一步发送所有移动信息。 all_movement = dict() - # Preparing the initial step. - # sets the locations and descriptions of all agents at the - # beginning of the demo determined by . + # 准备初始步骤。 + # 设置由 决定的演示开始时所有代理的位置和描述。 init_prep = dict() for int_key in range(step+1): key = str(int_key) @@ -80,7 +78,7 @@ def demo(request, sim_code, step, play_speed="2"): persona_init_pos[p.replace(" ","_")] = init_prep[p]["movement"] all_movement[step] = init_prep - # Finish loading + # 完成加载 for int_key in range(step+1, len(raw_all_movement.keys())): all_movement[int_key] = raw_all_movement[str(int_key)] @@ -240,15 +238,14 @@ def path_tester(request): def process_environment(request): """ - - This sends the frontend visual world information to the backend server. - It does this by writing the current environment representation to - "storage/environment.json" file. - - ARGS: - request: Django request - RETURNS: - HttpResponse: string confirmation message. + <前端到后端> + 此函数将前端可视化世界信息发送到后端服务器。 + 它通过将当前环境表示写入 "storage/environment.json" 文件来完成此操作。 + + 参数: + request: Django 请求 + 返回: + HttpResponse: 字符串确认消息。 """ # f_curr_sim_code = "temp_storage/curr_sim_code.json" # with open(f_curr_sim_code) as json_file: @@ -262,21 +259,19 @@ def process_environment(request): with open(f"storage/{sim_code}/environment/{step}.json", "w") as outfile: outfile.write(json.dumps(environment, indent=2)) - return HttpResponse("received") + return HttpResponse("已接收") def update_environment(request): """ - - This sends the backend computation of the persona behavior to the frontend - visual server. - It does this by reading the new movement information from - "storage/movement.json" file. - - ARGS: - request: Django request - RETURNS: - HttpResponse + <后端到前端> + 此函数将角色行为的后端计算结果发送到前端可视化服务器。 + 它通过从 "storage/movement.json" 文件读取新的移动信息来完成此操作。 + + 参数: + request: Django 请求 + 返回: + JsonResponse """ # f_curr_sim_code = "temp_storage/curr_sim_code.json" # with open(f_curr_sim_code) as json_file: @@ -297,13 +292,13 @@ def update_environment(request): def path_tester_update(request): """ - Processing the path and saving it to path_tester_env.json temp storage for - conducting the path tester. + 处理路径并将其保存到 path_tester_env.json 临时存储中, + 以便进行路径测试。 - ARGS: - request: Django request - RETURNS: - HttpResponse: string confirmation message. + 参数: + request: Django 请求 + 返回: + HttpResponse: 字符串确认消息。 """ data = json.loads(request.body) camera = data["camera"] @@ -311,7 +306,7 @@ def path_tester_update(request): with open(f"temp_storage/path_tester_env.json", "w") as outfile: outfile.write(json.dumps(camera, indent=2)) - return HttpResponse("received") + return HttpResponse("已接收") diff --git a/reverie/backend_server/path_finder.py b/reverie/backend_server/path_finder.py index 777053a298..493d0b849e 100644 --- a/reverie/backend_server/path_finder.py +++ b/reverie/backend_server/path_finder.py @@ -1,9 +1,9 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: path_finder.py -Description: Implements various path finding functions for generative agents. -Some of the functions are defunct. +文件: path_finder.py +描述: 实现用于生成式代理的各种路径查找函数。 +其中一些函数已失效。 """ import numpy as np @@ -37,26 +37,25 @@ def is_valid_position(maze, pos_r, pos_c): def solve_maze(maze, start, verbose=False): path = [] - # We use a Python list as a stack - then we have push operations as - # append, and pop as pop. + # 我们使用 Python 列表作为堆栈 - 然后我们将 append 作为入栈操作,pop 作为出栈操作。 stack = [] - # Add the entry point (as a tuple) + # 添加入口点 (作为元组) stack.append(start) - # Go through the stack as long as there are elements + # 只要堆栈中还有元素,就继续遍历 while len(stack) > 0: pos_r, pos_c = stack.pop() if verbose: - print("Current position", pos_r, pos_c) - if maze[pos_r][pos_c] == 'E': + print("Current position", pos_r, pos_c) # DEBUG: "Current position" + if maze[pos_r][pos_c] == 'E': # 'E' is an internal marker path += [(pos_r, pos_c)] return path - if maze[pos_r][pos_c] == 'X': - # Already visited + if maze[pos_r][pos_c] == 'X': # 'X' is an internal marker + # 已访问 continue - # Mark position as visited - maze[pos_r][pos_c] = 'X' + # 标记位置为已访问 + maze[pos_r][pos_c] = 'X' # 'X' is an internal marker path += [(pos_r, pos_c)] - # Check for all possible positions and add if possible + # 检查所有可能的位置,如果可能则添加 if is_valid_position(maze, pos_r - 1, pos_c): stack.append((pos_r - 1, pos_c)) if is_valid_position(maze, pos_r + 1, pos_c): @@ -66,15 +65,15 @@ def solve_maze(maze, start, verbose=False): if is_valid_position(maze, pos_r, pos_c + 1): stack.append((pos_r, pos_c + 1)) - # To follow the maze + # 跟踪迷宫(用于调试) if verbose: - print('Stack:' , stack) + print('Stack:' , stack) # DEBUG: "Stack:" print_maze(maze) - # We didn't find a path, hence we do not need to return the path + # 我们没有找到路径,因此不需要返回路径 return False - # clean maze + # 清理迷宫(将碰撞字符替换为'#') new_maze = [] for row in maze: new_row = [] @@ -162,10 +161,10 @@ def make_step(m, k): def path_finder(maze, start, end, collision_block_char, verbose=False): - # EMERGENCY PATCH + # 紧急补丁 start = (start[1], start[0]) end = (end[1], end[0]) - # END EMERGENCY PATCH + # 紧急补丁结束 path = path_finder_v2(maze, start, end, collision_block_char, verbose) @@ -196,8 +195,8 @@ def closest_coordinate(curr_coordinate, target_coordinates): def path_finder_2(maze, start, end, collision_block_char, verbose=False): - # start => persona_a - # end => persona_b + # start => 角色A + # end => 角色B start = list(start) end = list(end) @@ -221,8 +220,8 @@ def path_finder_2(maze, start, end, collision_block_char, verbose=False): def path_finder_3(maze, start, end, collision_block_char, verbose=False): - # start => persona_a - # end => persona_b + # start => 角色A + # end => 角色B curr_path = path_finder(maze, start, end, collision_block_char, verbose=False) if len(curr_path) <= 2: diff --git a/reverie/backend_server/persona/prompt_template/defunct_run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/defunct_run_gpt_prompt.py index d47ec3c9ce..6ec8ec5fad 100644 --- a/reverie/backend_server/persona/prompt_template/defunct_run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/defunct_run_gpt_prompt.py @@ -1,11 +1,10 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: defunct_run_gpt_prompt.py -Description: Defines all run gpt prompt functions. These functions directly -interface with the safe_generate_response function. +文件: defunct_run_gpt_prompt.py +描述: 定义所有运行 gpt 提示的函数。这些函数直接与 safe_generate_response 函数交互。 -Note (March 10, 2023) -- Defunct +注意 (2023年3月10日) -- 已弃用 """ import re import datetime @@ -19,14 +18,13 @@ def get_random_alphanumeric(i=6, j=6): """ - Returns a random alpha numeric strength that has the length of somewhere - between i and j. - - INPUT: - i: min_range for the length - j: max_range for the length - OUTPUT: - an alpha numeric str with the length of somewhere between i and j. + 返回一个长度在 i 和 j 之间的随机字母数字字符串。 + + 输入: + i: 长度的最小值范围 + j: 长度的最大值范围 + 输出: + 一个长度在 i 和 j 之间的字母数字字符串。 """ k = random.randint(i, j) x = ''.join(random.choices(string.ascii_letters + string.digits, k=k)) @@ -34,18 +32,17 @@ def get_random_alphanumeric(i=6, j=6): ############################################################################## -# CHAPTER 1: Run GPT Prompt +# 第一章: 运行 GPT 提示 ############################################################################## def run_gpt_prompt_wake_up_hour(persona, test_input=None, verbose=False): """ - Given the persona, returns an integer that indicates the hour when the - persona wakes up. + 给定角色,返回一个整数,表示角色醒来的小时。 - INPUT: - persona: The Persona class instance - OUTPUT: - integer for the wake up hour. + 输入: + persona: Persona 类实例 + 输出: + 表示醒来小时的整数。 """ def create_prompt_input(persona, test_input=None): if test_input: return test_input @@ -55,6 +52,7 @@ def create_prompt_input(persona, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): + # Assumes format like "8 am" or "8am" cr = int(gpt_response.strip().lower().split("am")[0]) return cr @@ -64,7 +62,7 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(): - fs = 8 + fs = 8 # Default wake up hour return fs gpt_param = {"engine": "text-davinci-002", "max_tokens": 5, @@ -90,16 +88,16 @@ def run_gpt_prompt_daily_plan(persona, test_input=None, verbose=False): """ - Basically the long term planning that spans a day. Returns a list of actions - that the persona will take today. Usually comes in the following form: + 基本上是跨越一天的长期规划。返回角色今天将要执行的动作列表。 + 通常格式如下: 'wake up and complete the morning routine at 6:00 am', - 'eat breakfast at 7:00 am',.. - Note that the actions come without a period. + 'eat breakfast at 7:00 am',... + 注意动作描述末尾不带句号。 - INPUT: - persona: The Persona class instance - OUTPUT: - a list of daily actions in broad strokes. + 输入: + persona: Persona 类实例 + 输出: + 一个大致的每日行动列表。 """ def create_prompt_input(persona, wake_up_hour, test_input=None): if test_input: return test_input @@ -128,13 +126,13 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(): - fs = ['wake up and complete the morning routine at 6:00 am', - 'eat breakfast at 7:00 am', - 'read a book from 8:00 am to 12:00 pm', - 'have lunch at 12:00 pm', - 'take a nap from 1:00 pm to 4:00 pm', - 'relax and watch TV from 7:00 pm to 8:00 pm', - 'go to bed at 11:00 pm'] + fs = ['wake up and complete the morning routine at 6:00 am', # Example, not translated + 'eat breakfast at 7:00 am', # Example, not translated + 'read a book from 8:00 am to 12:00 pm', # Example, not translated + 'have lunch at 12:00 pm', # Example, not translated + 'take a nap from 1:00 pm to 4:00 pm', # Example, not translated + 'relax and watch TV from 7:00 pm to 8:00 pm', # Example, not translated + 'go to bed at 11:00 pm'] # Example, not translated return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 500, @@ -174,11 +172,10 @@ def create_prompt_input(persona, schedule_format = "" for i in hour_str: schedule_format += f"[{persona.scratch.get_str_curr_date_str()} -- {i}]" - schedule_format += f" Activity: [Fill in]\n" + schedule_format += f" 活动: [请填写]\n" # "Activity: [Fill in]" schedule_format = schedule_format[:-1] - intermission_str = f"Here the originally intended hourly breakdown of" - intermission_str += f" {persona.scratch.get_str_firstname()}'s schedule today: " + intermission_str = f"这是 {persona.scratch.get_str_firstname()} 今天日程最初计划的每小时细分:" for count, i in enumerate(persona.scratch.daily_req): intermission_str += f"{str(count+1)}) {i}, " intermission_str = intermission_str[:-2] @@ -217,7 +214,7 @@ def create_prompt_input(persona, def __func_clean_up(gpt_response, prompt=""): cr = gpt_response.strip() - if cr[-1] == ".": + if cr[-1] == ".": # Checking for period, not a translatable character in this context cr = cr[:-1] return cr @@ -227,7 +224,7 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(): - fs = "asleep" + fs = "asleep" # Default state, not translated return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 50, @@ -278,21 +275,21 @@ def create_prompt_input(persona, task, duration, test_input=None): # if curr_f_org_index > 0: # all_indices += [curr_f_org_index-1] all_indices += [curr_f_org_index] - if curr_f_org_index+1 <= len(persona.scratch.f_daily_schedule_hourly_org): + if curr_f_org_index+1 <= len(persona.scratch.f_daily_schedule_hourly_org): # Fixed: should be <= to include the last element if it's the end all_indices += [curr_f_org_index+1] - if curr_f_org_index+2 <= len(persona.scratch.f_daily_schedule_hourly_org): + if curr_f_org_index+2 <= len(persona.scratch.f_daily_schedule_hourly_org): # Fixed: should be <= all_indices += [curr_f_org_index+2] curr_time_range = "" - print ("DEBUG") - print (persona.scratch.f_daily_schedule_hourly_org) - print (all_indices) + print ("DEBUG") # DEBUG + print (persona.scratch.f_daily_schedule_hourly_org) # DEBUG + print (all_indices) # DEBUG - summ_str = f'Today is {persona.scratch.curr_time.strftime("%B %d, %Y")}. ' - summ_str += f'From ' + summ_str = f'今天是 {persona.scratch.curr_time.strftime("%B %d, %Y")}。 ' # "Today is..." + summ_str += f'从 ' # "From " for index in all_indices: - print ("index", index) + print ("index", index) # DEBUG if index < len(persona.scratch.f_daily_schedule_hourly_org): start_min = 0 for i in range(index): @@ -304,8 +301,8 @@ def create_prompt_input(persona, task, duration, test_input=None): + datetime.timedelta(minutes=end_min)) start_time_str = start_time.strftime("%H:%M%p") end_time_str = end_time.strftime("%H:%M%p") - summ_str += f"{start_time_str} ~ {end_time_str}, {persona.name} is planning on {persona.scratch.f_daily_schedule_hourly_org[index][0]}, " - if curr_f_org_index+1 == index: + summ_str += f"{start_time_str} ~ {end_time_str}, {persona.name} 计划进行 {persona.scratch.f_daily_schedule_hourly_org[index][0]}, " # "...is planning on..." + if curr_f_org_index+1 == index: # This logic might need review depending on intent with all_indices bounds. curr_time_range = f'{start_time_str} ~ {end_time_str}' summ_str = summ_str[:-2] + "." @@ -322,11 +319,11 @@ def create_prompt_input(persona, task, duration, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - print ("TOODOOOOOO") - print (gpt_response) - print ("-==- -==- -==- ") + print ("TOODOOOOOO") # DEBUG + print (gpt_response) # DEBUG + print ("-==- -==- -==- ") # DEBUG - # TODO SOMETHING HERE sometimes fails... See screenshot + # 待办:这里有时会失败……请看截图 temp = [i.strip() for i in gpt_response.split("\n")] _cr = [] cr = [] @@ -346,8 +343,7 @@ def __func_clean_up(gpt_response, prompt=""): total_expected_min = int(prompt.split("(total duration in minutes")[-1] .split("):")[0].strip()) - # TODO -- now, you need to make sure that this is the same as the sum of - # the current action sequence. + # 待办 -- 现在,你需要确保这与当前动作序列的总和相同。 curr_min_slot = [["dummy", -1],] # (task_name, task_index) for count, i in enumerate(cr): i_task = i[0] @@ -379,16 +375,16 @@ def __func_clean_up(gpt_response, prompt=""): return cr def __func_validate(gpt_response, prompt=""): - # TODO -- this sometimes generates error + # 待办 -- 这里有时会产生错误 try: __func_clean_up(gpt_response) except: pass # return False - return gpt_response + return gpt_response # This should ideally return a boolean after validation def get_fail_safe(): - fs = ["asleep"] + fs = ["asleep"] # Default state, not translated return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 1000, @@ -399,13 +395,13 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - print ("?????") - print (prompt) + print ("?????") # DEBUG + print (prompt) # DEBUG output = safe_generate_response(prompt, gpt_param, 5, get_fail_safe(), __func_validate, __func_clean_up) - # TODO THERE WAS A BUG HERE... - # This is for preventing overflows... + # 待办:这里曾有一个BUG…… + # 这是为了防止溢出…… """ File "/Users/joonsungpark/Desktop/Stanford/Projects/ generative-personas/src_exploration/reverie_simulation/ @@ -515,7 +511,7 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(): - fs = ("kitchen") + fs = ("kitchen") # Location, not translated return fs gpt_param = {"engine": "text-davinci-002", "max_tokens": 15, @@ -596,7 +592,7 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(): - fs = ("kitchen") + fs = ("kitchen") # Location, not translated return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 15, @@ -651,7 +647,7 @@ def __func_clean_up(gpt_response, prompt=""): return cleaned_response def get_fail_safe(): - fs = ("bed") + fs = ("bed") # Object, not translated return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 15, @@ -702,7 +698,7 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(): - fs = "😋" + fs = "😋" # Emoji, not translated return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 15, @@ -757,7 +753,7 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(persona): - fs = (persona.name, "is", "idle") + fs = (persona.name, "is", "idle") # "is", "idle" are keywords return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 30, @@ -811,7 +807,7 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(act_game_object): - fs = f"{act_game_object} is idle" + fs = f"{act_game_object} is idle" # "is idle" is a keyword phrase return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 30, @@ -859,7 +855,7 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(act_game_object): - fs = (act_game_object, "is", "idle") + fs = (act_game_object, "is", "idle") # "is", "idle" are keywords return fs gpt_param = {"engine": "text-davinci-003", "max_tokens": 30, @@ -977,13 +973,15 @@ def __func_validate(gpt_response, prompt=""): return True def get_fail_safe(main_act_dur, truncated_act_dur): + # This function constructs a fallback schedule. The task descriptions + # themselves are data and should not be translated here. dur_sum = 0 for act, dur in main_act_dur: dur_sum += dur ret = truncated_act_dur[:] ret += main_act_dur[len(ret)-1:] - # If there are access, we need to trim... + # 如果有超出,我们需要修剪... (If there are access, we need to trim...) ret_dur_sum = 0 count = 0 over = None @@ -1257,7 +1255,7 @@ def create_prompt_input(init_persona, target_persona, curr_loc, for i in init_persona.a_mem.seq_chat: if i.object == target_persona.scratch.name: v1 = int((init_persona.scratch.curr_time - i.created).total_seconds()/60) - prev_convo_insert += f'{str(v1)} minutes ago, they had the following conversation.\n' + prev_convo_insert += f'{str(v1)} 分钟前,他们进行了以下对话。\n' # "... minutes ago, they had the following conversation." for row in i.filling: prev_convo_insert += f'{row[0]}: "{row[1]}"\n' break @@ -1284,15 +1282,15 @@ def create_prompt_input(init_persona, target_persona, curr_loc, init_persona_curr_desc = "" if init_persona.scratch.planned_path: - init_persona_curr_desc = f"{init_persona.name} is on the way to {init_persona.scratch.act_description}" + init_persona_curr_desc = f"{init_persona.name} 正在前往 {init_persona.scratch.act_description}" # "...is on the way to..." else: - init_persona_curr_desc = f"{init_persona.name} is {init_persona.scratch.act_description}" + init_persona_curr_desc = f"{init_persona.name} 正在 {init_persona.scratch.act_description}" # "...is..." target_persona_curr_desc = "" if target_persona.scratch.planned_path: - target_persona_curr_desc = f"{target_persona.name} is on the way to {target_persona.scratch.act_description}" + target_persona_curr_desc = f"{target_persona.name} 正在前往 {target_persona.scratch.act_description}" # "...is on the way to..." else: - target_persona_curr_desc = f"{target_persona.name} is {target_persona.scratch.act_description}" + target_persona_curr_desc = f"{target_persona.name} 正在 {target_persona.scratch.act_description}" # "...is..." curr_loc = curr_loc["arena"] @@ -1324,11 +1322,11 @@ def create_prompt_input(init_persona, target_persona, curr_loc, return prompt_input def __func_clean_up(gpt_response, prompt=""): - # print ("???") - # print (gpt_response) + # print ("???") # DEBUG + # print (gpt_response) # DEBUG - - gpt_response = (prompt + gpt_response).split("What would they talk about now?")[-1].strip() + # "What would they talk about now?" is part of prompt structure. + gpt_response = (prompt + gpt_response).split("他们现在会谈论什么?")[-1].strip() content = re.findall('"([^"]*)"', gpt_response) speaker_order = [] @@ -1351,8 +1349,8 @@ def __func_validate(gpt_response, prompt=""): return False def get_fail_safe(init_persona, target_persona): - convo = [[init_persona.name, "Hi!"], - [target_persona.name, "Hi!"]] + convo = [[init_persona.name, "你好!"], + [target_persona.name, "你好!"]] return convo @@ -1393,7 +1391,7 @@ def create_prompt_input(conversation, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - ret = "conversing about " + gpt_response.strip() + ret = "谈论关于 " + gpt_response.strip() # "conversing about " return ret def __func_validate(gpt_response, prompt=""): @@ -1404,7 +1402,7 @@ def __func_validate(gpt_response, prompt=""): return False def get_fail_safe(): - return "conversing with a housemate about morning greetings" + return "与室友谈论晨间问候" # "conversing with a housemate about morning greetings" gpt_param = {"engine": "text-davinci-003", "max_tokens": 50, "temperature": 0, "top_p": 1, "stream": False, @@ -1434,9 +1432,9 @@ def create_prompt_input(description, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - print ("???") - print (gpt_response) - gpt_response = gpt_response.strip().split("Emotive keywords:") + print ("???") # DEBUG + print (gpt_response) # DEBUG + gpt_response = gpt_response.strip().split("情感关键词:") # "Emotive keywords:" factual = [i.strip() for i in gpt_response[0].split(",")] emotive = [i.strip() for i in gpt_response[1].split(",")] all_keywords = factual + emotive diff --git a/reverie/backend_server/persona/prompt_template/gpt_structure.py b/reverie/backend_server/persona/prompt_template/gpt_structure.py index f9c4718949..6904d1b382 100644 --- a/reverie/backend_server/persona/prompt_template/gpt_structure.py +++ b/reverie/backend_server/persona/prompt_template/gpt_structure.py @@ -1,8 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: gpt_structure.py -Description: Wrapper functions for calling OpenAI APIs. +文件: gpt_structure.py +描述: 调用 OpenAI API 的封装函数。 """ import json import random @@ -27,20 +27,17 @@ def ChatGPT_single_request(prompt): # ============================================================================ -# #####################[SECTION 1: CHATGPT-3 STRUCTURE] ###################### +# #####################[第一节: CHATGPT-3 结构] ###################### # ============================================================================ def GPT4_request(prompt): """ - Given a prompt and a dictionary of GPT parameters, make a request to OpenAI - server and returns the response. - ARGS: - prompt: a str prompt - gpt_parameter: a python dictionary with the keys indicating the names of - the parameter and the values indicating the parameter - values. - RETURNS: - a str of GPT-3's response. + 给定一个提示和 GPT 参数字典,向 OpenAI 服务器发出请求并返回响应。 + 参数: + prompt: 一个字符串提示 + gpt_parameter: 一个 Python 字典,其键指示参数名称,值指示参数值。 + 返回: + GPT-3 响应的字符串。 (注:应为 GPT-4) """ temp_sleep() @@ -52,23 +49,20 @@ def GPT4_request(prompt): return completion["choices"][0]["message"]["content"] except: - print ("ChatGPT ERROR") - return "ChatGPT ERROR" + print ("ChatGPT ERROR") # Kept in English as an error identifier + return "ChatGPT ERROR" # Kept in English as an error identifier def ChatGPT_request(prompt): """ - Given a prompt and a dictionary of GPT parameters, make a request to OpenAI - server and returns the response. - ARGS: - prompt: a str prompt - gpt_parameter: a python dictionary with the keys indicating the names of - the parameter and the values indicating the parameter - values. - RETURNS: - a str of GPT-3's response. + 给定一个提示和 GPT 参数字典,向 OpenAI 服务器发出请求并返回响应。 + 参数: + prompt: 一个字符串提示 + gpt_parameter: 一个 Python 字典,其键指示参数名称,值指示参数值。 + 返回: + GPT-3 响应的字符串。 """ - # temp_sleep() + # 临时休眠() try: completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", @@ -89,13 +83,13 @@ def GPT4_safe_generate_response(prompt, func_validate=None, func_clean_up=None, verbose=False): - prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n' - prompt += f"Output the response to the prompt above in json. {special_instruction}\n" - prompt += "Example output json:\n" - prompt += '{"output": "' + str(example_output) + '"}' + prompt = 'GPT-3 提示:\n"""\n' + prompt + '\n"""\n' # "GPT-3 Prompt:" + prompt += f"请将对上述提示的回应以 json 格式输出。{special_instruction}\n" # "Output the response to the prompt above in json." + prompt += "输出 json 示例:\n" # "Example output json:" + prompt += '{"output": "' + str(example_output) + '"}' # "output" is a key, not translated. if verbose: - print ("CHAT GPT PROMPT") + print ("CHAT GPT PROMPT") # DEBUG: "CHAT GPT PROMPT" print (prompt) for i in range(repeat): @@ -110,9 +104,9 @@ def GPT4_safe_generate_response(prompt, return func_clean_up(curr_gpt_response, prompt=prompt) if verbose: - print ("---- repeat count: \n", i, curr_gpt_response) + print ("---- 重试次数: \n", i, curr_gpt_response) # "---- repeat count: \n" print (curr_gpt_response) - print ("~~~~") + print ("~~~~") # Separator, not translated except: pass @@ -128,14 +122,14 @@ def ChatGPT_safe_generate_response(prompt, func_validate=None, func_clean_up=None, verbose=False): - # prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n' + # prompt = 'GPT-3 提示:\n"""\n' + prompt + '\n"""\n' # "GPT-3 Prompt:" prompt = '"""\n' + prompt + '\n"""\n' - prompt += f"Output the response to the prompt above in json. {special_instruction}\n" - prompt += "Example output json:\n" - prompt += '{"output": "' + str(example_output) + '"}' + prompt += f"请将对上述提示的回应以 json 格式输出。{special_instruction}\n" # "Output the response to the prompt above in json." + prompt += "输出 json 示例:\n" # "Example output json:" + prompt += '{"output": "' + str(example_output) + '"}' # "output" is a key, not translated. if verbose: - print ("CHAT GPT PROMPT") + print ("CHAT GPT PROMPT") # DEBUG: "CHAT GPT PROMPT" print (prompt) for i in range(repeat): @@ -146,17 +140,17 @@ def ChatGPT_safe_generate_response(prompt, curr_gpt_response = curr_gpt_response[:end_index] curr_gpt_response = json.loads(curr_gpt_response)["output"] - # print ("---ashdfaf") - # print (curr_gpt_response) - # print ("000asdfhia") + # print ("---ashdfaf") # DEBUG + # print (curr_gpt_response) # DEBUG + # print ("000asdfhia") # DEBUG if func_validate(curr_gpt_response, prompt=prompt): return func_clean_up(curr_gpt_response, prompt=prompt) if verbose: - print ("---- repeat count: \n", i, curr_gpt_response) + print ("---- 重试次数: \n", i, curr_gpt_response) # "---- repeat count: \n" print (curr_gpt_response) - print ("~~~~") + print ("~~~~") # Separator, not translated except: pass @@ -171,7 +165,7 @@ def ChatGPT_safe_generate_response_OLD(prompt, func_clean_up=None, verbose=False): if verbose: - print ("CHAT GPT PROMPT") + print ("CHAT GPT PROMPT") # DEBUG: "CHAT GPT PROMPT" print (prompt) for i in range(repeat): @@ -180,31 +174,28 @@ def ChatGPT_safe_generate_response_OLD(prompt, if func_validate(curr_gpt_response, prompt=prompt): return func_clean_up(curr_gpt_response, prompt=prompt) if verbose: - print (f"---- repeat count: {i}") + print (f"---- 重试次数: {i}") # "---- repeat count: " print (curr_gpt_response) - print ("~~~~") + print ("~~~~") # Separator, not translated except: pass - print ("FAIL SAFE TRIGGERED") + print ("FAIL SAFE TRIGGERED") # System status message, kept in English for potential grepping, or "故障安全已触发" return fail_safe_response # ============================================================================ -# ###################[SECTION 2: ORIGINAL GPT-3 STRUCTURE] ################### +# ###################[第二节: 原始 GPT-3 结构] ################### # ============================================================================ def GPT_request(prompt, gpt_parameter): """ - Given a prompt and a dictionary of GPT parameters, make a request to OpenAI - server and returns the response. - ARGS: - prompt: a str prompt - gpt_parameter: a python dictionary with the keys indicating the names of - the parameter and the values indicating the parameter - values. - RETURNS: - a str of GPT-3's response. + 给定一个提示和 GPT 参数字典,向 OpenAI 服务器发出请求并返回响应。 + 参数: + prompt: 一个字符串提示 + gpt_parameter: 一个 Python 字典,其键指示参数名称,值指示参数值。 + 返回: + GPT-3 响应的字符串。 """ temp_sleep() try: @@ -226,17 +217,14 @@ def GPT_request(prompt, gpt_parameter): def generate_prompt(curr_input, prompt_lib_file): """ - Takes in the current input (e.g. comment that you want to classifiy) and - the path to a prompt file. The prompt file contains the raw str prompt that - will be used, which contains the following substr: !! -- this - function replaces this substr with the actual curr_input to produce the - final promopt that will be sent to the GPT3 server. - ARGS: - curr_input: the input we want to feed in (IF THERE ARE MORE THAN ONE - INPUT, THIS CAN BE A LIST.) - prompt_lib_file: the path to the promopt file. - RETURNS: - a str prompt that will be sent to OpenAI's GPT server. + 接收当前输入(例如,您想要分类的评论)和提示文件的路径。 + 提示文件包含将使用的原始字符串提示,其中包含以下子字符串:!! + ——此函数将此子字符串替换为实际的 curr_input,以生成将发送到 GPT-3 服务器的最终提示。 + 参数: + curr_input: 我们要输入的输入(如果多于一个输入,这可以是一个列表。) + prompt_lib_file: 提示文件的路径。 + 返回: + 将发送到 OpenAI GPT 服务器的字符串提示。 """ if type(curr_input) == type("string"): curr_input = [curr_input] @@ -276,7 +264,7 @@ def safe_generate_response(prompt, def get_embedding(text, model="text-embedding-ada-002"): text = text.replace("\n", " ") if not text: - text = "this is blank" + text = "此处为空白" # "this is blank" return openai.Embedding.create( input=[text], model=model)['data'][0]['embedding'] diff --git a/reverie/backend_server/persona/prompt_template/print_prompt.py b/reverie/backend_server/persona/prompt_template/print_prompt.py index 646c0f74af..1de359eb92 100644 --- a/reverie/backend_server/persona/prompt_template/print_prompt.py +++ b/reverie/backend_server/persona/prompt_template/print_prompt.py @@ -1,8 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: print_prompt.py -Description: For printing prompts when the setting for verbose is set to True. +文件: print_prompt.py +描述: 用于在详细模式设置为 True 时打印提示信息。 """ import sys sys.path.append('../') @@ -17,7 +17,7 @@ from utils import * ############################################################################## -# PERSONA Chapter 1: Prompt Structures # +# 角色 第1章: 提示结构 # ############################################################################## def print_run_prompts(prompt_template=None, @@ -27,15 +27,15 @@ def print_run_prompts(prompt_template=None, prompt=None, output=None): print (f"=== {prompt_template}") - print ("~~~ persona ---------------------------------------------------") + print ("~~~ 角色 ---------------------------------------------------") print (persona.name, "\n") - print ("~~~ gpt_param ----------------------------------------------------") + print ("~~~ gpt参数 ----------------------------------------------------") print (gpt_param, "\n") - print ("~~~ prompt_input ----------------------------------------------") + print ("~~~ 提示输入 ----------------------------------------------") print (prompt_input, "\n") - print ("~~~ prompt ----------------------------------------------------") + print ("~~~ 提示 ----------------------------------------------------") print (prompt, "\n") - print ("~~~ output ----------------------------------------------------") + print ("~~~ 输出 ----------------------------------------------------") print (output, "\n") - print ("=== END ==========================================================") + print ("=== 结束 ==========================================================") print ("\n\n\n") diff --git a/reverie/backend_server/test.py b/reverie/backend_server/test.py index 41ce26155b..cdaae04273 100644 --- a/reverie/backend_server/test.py +++ b/reverie/backend_server/test.py @@ -1,8 +1,8 @@ """ -Author: Joon Sung Park (joonspk@stanford.edu) +作者: Joon Sung Park (joonspk@stanford.edu) -File: gpt_structure.py -Description: Wrapper functions for calling OpenAI APIs. +文件: gpt_structure.py (注意:实际文件名是 test.py) +描述: 调用 OpenAI API 的封装函数。 """ import json import random @@ -14,17 +14,14 @@ def ChatGPT_request(prompt): """ - Given a prompt and a dictionary of GPT parameters, make a request to OpenAI - server and returns the response. - ARGS: - prompt: a str prompt - gpt_parameter: a python dictionary with the keys indicating the names of - the parameter and the values indicating the parameter - values. - RETURNS: - a str of GPT-3's response. + 给定一个提示和 GPT 参数字典,向 OpenAI 服务器发出请求并返回响应。 + 参数: + prompt: 一个字符串提示 + gpt_parameter: 一个 Python 字典,其键指示参数名称,值指示参数值。 + 返回: + GPT-3 响应的字符串。 """ - # temp_sleep() + # 临时休眠() try: completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", @@ -33,31 +30,31 @@ def ChatGPT_request(prompt): return completion["choices"][0]["message"]["content"] except: - print ("ChatGPT ERROR") - return "ChatGPT ERROR" + print ("ChatGPT ERROR") # Error message, kept in English + return "ChatGPT ERROR" # Error message, kept in English prompt = """ --- -Character 1: Maria Lopez is working on her physics degree and streaming games on Twitch to make some extra money. She visits Hobbs Cafe for studying and eating just about everyday. -Character 2: Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities. +角色1: Maria Lopez 正在攻读物理学位,并在 Twitch 上直播游戏以赚取额外收入。她几乎每天都去 Hobbs Cafe 学习和吃饭。 +角色2: Klaus Mueller 正在撰写一篇关于中产阶级化对低收入社区影响的研究论文。 -Past Context: -138 minutes ago, Maria Lopez and Klaus Mueller were already conversing about conversing about Maria's research paper mentioned by Klaus This context takes place after that conversation. +过去背景: +138 分钟前,Maria Lopez 和 Klaus Mueller 已经在谈论 Klaus 提到的 Maria 的研究论文。此对话发生在那次谈话之后。 -Current Context: Maria Lopez was attending her Physics class (preparing for the next lecture) when Maria Lopez saw Klaus Mueller in the middle of working on his research paper at the library (writing the introduction). -Maria Lopez is thinking of initating a conversation with Klaus Mueller. -Current Location: library in Oak Hill College +当前背景: Maria Lopez 正在上她的物理课(为下一堂课做准备)时,在 Oak Hill College 的图书馆看到 Klaus Mueller 正在写他的研究论文(撰写引言部分)。 +Maria Lopez 正考虑与 Klaus Mueller 发起一次对话。 +当前位置: library in Oak Hill College -(This is what is in Maria Lopez's head: Maria Lopez should remember to follow up with Klaus Mueller about his thoughts on her research paper. Beyond this, Maria Lopez doesn't necessarily know anything more about Klaus Mueller) +(这是 Maria Lopez 心里想的:Maria Lopez 应该记得就 Klaus Mueller 对她研究论文的看法进行后续交流。除此之外,Maria Lopez 不一定更了解 Klaus Mueller) -(This is what is in Klaus Mueller's head: Klaus Mueller should remember to ask Maria Lopez about her research paper, as she found it interesting that he mentioned it. Beyond this, Klaus Mueller doesn't necessarily know anything more about Maria Lopez) +(这是 Klaus Mueller 心里想的:Klaus Mueller 应该记得问 Maria Lopez 关于她的研究论文,因为他提到这篇论文时她觉得很有趣。除此之外,Klaus Mueller 不一定更了解 Maria Lopez) -Here is their conversation. +这是他们的对话。 Maria Lopez: " --- -Output the response to the prompt above in json. The output should be a list of list where the inner lists are in the form of ["", ""]. Output multiple utterances in ther conversation until the conversation comes to a natural conclusion. -Example output json: +以上述提示为准,以 json 格式输出回应。输出应该是一个列表的列表,其中内部列表的形式为 ["<姓名>", "<话语>"]。在对话中输出多轮话语,直到对话自然结束。 +输出 json 示例: {"output": "[["Jane Doe", "Hi!"], ["John Doe", "Hello there!"] ... ]"} """