Skip to content
This repository was archived by the owner on Dec 8, 2023. It is now read-only.

Commit 020ce9a

Browse files
authored
Merge pull request #71 from kabilar/main
Add `black` code formatting
2 parents 2dcde24 + 54e2dc6 commit 020ce9a

25 files changed

+1419
-808
lines changed

notebooks/00-data-download-optional.ipynb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
"metadata": {},
2828
"outputs": [],
2929
"source": [
30+
"%%bash\n",
3031
"pip install git+https://github.com/datajoint/djarchive-client.git"
3132
]
3233
},
@@ -183,7 +184,7 @@
183184
],
184185
"metadata": {
185186
"jupytext": {
186-
"formats": "ipynb,py"
187+
"formats": "ipynb,py_scripts//py"
187188
},
188189
"kernelspec": {
189190
"display_name": "ephys_workflow_runner",

notebooks/py_scripts/00-data-download-optional.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
# ---
22
# jupyter:
33
# jupytext:
4-
# formats: ipynb,py
4+
# formats: ipynb,py_scripts//py
55
# text_representation:
66
# extension: .py
77
# format_name: light
88
# format_version: '1.5'
9-
# jupytext_version: 1.13.7
9+
# jupytext_version: 1.14.0
1010
# kernelspec:
1111
# display_name: ephys_workflow_runner
1212
# language: python
@@ -19,12 +19,15 @@
1919

2020
# The example dataset was hosted on djarchive, an AWS storage. We provide a client package to download the data.[djarchive-client](https://github.com/datajoint/djarchive-client), which could be installed with pip:
2121

22-
pip install git+https://github.com/datajoint/djarchive-client.git
22+
# + language="bash"
23+
# pip install git+https://github.com/datajoint/djarchive-client.git
24+
# -
2325

2426
# ## Download ephys test datasets using `djarchive-client`
2527

2628
import os
2729
import djarchive_client
30+
2831
client = djarchive_client.client()
2932

3033
# To browse the datasets that are available in djarchive:
@@ -37,17 +40,22 @@
3740

3841
# To download the dataset, let's prepare a root directory, for example in `/tmp`:
3942

40-
os.mkdir('/tmp/test_data')
43+
os.mkdir("/tmp/test_data")
4144

4245
# Get the dataset revision with the current version of the workflow:
4346

4447
from workflow_array_ephys import version
45-
revision = version.__version__.replace('.', '_')
48+
49+
revision = version.__version__.replace(".", "_")
4650
revision
4751

4852
# Then run download for a given set and the revision:
4953

50-
client.download('workflow-array-ephys-test-set', target_directory='/tmp/test_data', revision=revision)
54+
client.download(
55+
"workflow-array-ephys-test-set",
56+
target_directory="/tmp/test_data",
57+
revision=revision,
58+
)
5159

5260
# ## Directory organization
5361
# After downloading, the directory will be organized as follows:

notebooks/py_scripts/01-configure.py

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -26,20 +26,24 @@
2626
# -
2727

2828
import os
29+
2930
# change to the upper level folder
30-
if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')
31-
assert os.path.basename(os.getcwd())=='workflow-array-ephys', ("Please move to the "
32-
+ "workflow directory")
31+
if os.path.basename(os.getcwd()) == "notebooks":
32+
os.chdir("..")
33+
assert os.path.basename(os.getcwd()) == "workflow-array-ephys", (
34+
"Please move to the " + "workflow directory"
35+
)
3336
import datajoint as dj
3437

3538
# ## Setup - Credentials
3639
#
3740
# Now let's set up the host, user and password in the `dj.config` global variable
3841

3942
import getpass
40-
dj.config['database.host'] = '{YOUR_HOST}'
41-
dj.config['database.user'] = '{YOUR_USERNAME}'
42-
dj.config['database.password'] = getpass.getpass() # enter the password securily
43+
44+
dj.config["database.host"] = "{YOUR_HOST}"
45+
dj.config["database.user"] = "{YOUR_USERNAME}"
46+
dj.config["database.password"] = getpass.getpass() # enter the password securily
4347

4448
# You should be able to connect to the database at this stage.
4549

@@ -55,11 +59,11 @@
5559
#
5660
# The prefix could be configurated as follows in `dj.config`:
5761

58-
dj.config['custom'] = {'database.prefix': 'neuro_'}
62+
dj.config["custom"] = {"database.prefix": "neuro_"}
5963

6064
# ### Root directories for raw/processed data
6165
#
62-
# `ephys_root_data_dir` field indicates the root directory for
66+
# `ephys_root_data_dir` field indicates the root directory for
6367
# + The **ephys raw data** from SpikeGLX or OpenEphys, including `*{.ap,lf}.{bin,meta}`
6468
# + The **clustering results** from kilosort2 (e.g. `spike_{times,clusters}.npy`
6569
#
@@ -77,9 +81,9 @@
7781
# ```
7882

7983
# If there is only one root path.
80-
dj.config['custom']['ephys_root_data_dir'] = '/tmp/test_data'
84+
dj.config["custom"]["ephys_root_data_dir"] = "/tmp/test_data"
8185
# If there are multiple possible root paths:
82-
dj.config['custom']['ephys_root_data_dir'] = ['/tmp/test_data1', '/tmp/test_data2']
86+
dj.config["custom"]["ephys_root_data_dir"] = ["/tmp/test_data1", "/tmp/test_data2"]
8387

8488
dj.config
8589

@@ -92,7 +96,7 @@
9296
#
9397
# `element-array-ephys` offers 3 different schemas: `acute`, `chronic`, and `no-curation`. For more information about each, please visit the [electrophysiology description page](https://elements.datajoint.org/description/array_ephys/). This decision should be made before first activating the schema. Note: only `no-curation` is supported for export to NWB directly from the Element.
9498

95-
dj.config['custom']['ephys_mode']='no-curation' # or acute or chronic
99+
dj.config["custom"]["ephys_mode"] = "no-curation" # or acute or chronic
96100

97101
# ## Save configuration
98102
#

notebooks/py_scripts/02-workflow-structure-optional.py

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,9 @@
2424
# To load the local configuration, we will change the directory to the package root.
2525

2626
import os
27-
if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')
27+
28+
if os.path.basename(os.getcwd()) == "notebooks":
29+
os.chdir("..")
2830

2931
# ## Schemas and tables
3032

@@ -57,24 +59,24 @@
5759
# + `dj.list_schemas()`: list all schemas a user could access.
5860
dj.list_schemas()
5961

60-
# + `dj.Diagram()`: plot tables and dependencies.
62+
# + `dj.Diagram()`: plot tables and dependencies.
6163

6264
# + `dj.Diagram()`: plot tables and dependencies
6365
# plot diagram for all tables in a schema
6466
dj.Diagram(ephys)
6567
# -
6668

67-
# **Table tiers**:
69+
# **Table tiers**:
6870
#
69-
# Manual table: green box, manually inserted table, expect new entries daily, e.g. Subject, ProbeInsertion.
70-
# Lookup table: gray box, pre inserted table, commonly used for general facts or parameters. e.g. Strain, ClusteringMethod, ClusteringParamSet.
71-
# Imported table: blue oval, auto-processing table, the processing depends on the importing of external files. e.g. process of Clustering requires output files from kilosort2.
72-
# Computed table: red circle, auto-processing table, the processing does not depend on files external to the database, commonly used for
71+
# Manual table: green box, manually inserted table, expect new entries daily, e.g. Subject, ProbeInsertion.
72+
# Lookup table: gray box, pre inserted table, commonly used for general facts or parameters. e.g. Strain, ClusteringMethod, ClusteringParamSet.
73+
# Imported table: blue oval, auto-processing table, the processing depends on the importing of external files. e.g. process of Clustering requires output files from kilosort2.
74+
# Computed table: red circle, auto-processing table, the processing does not depend on files external to the database, commonly used for
7375
# Part table: plain text, as an appendix to the master table, all the part entries of a given master entry represent a intact set of the master entry. e.g. Unit of a CuratedClustering.
7476
#
75-
# **Dependencies**:
77+
# **Dependencies**:
7678
#
77-
# One-to-one primary: thick solid line, share the exact same primary key, meaning the child table inherits all the primary key fields from the parent table as its own primary key.
79+
# One-to-one primary: thick solid line, share the exact same primary key, meaning the child table inherits all the primary key fields from the parent table as its own primary key.
7880
# One-to-many primary: thin solid line, inherit the primary key from the parent table, but have additional field(s) as part of the primary key as well
7981
# secondary dependency: dashed line, the child table inherits the primary key fields from parent table as its own secondary attribute.
8082

@@ -95,7 +97,7 @@
9597
# + `heading`: [markdown]
9698
# # + `describe()`: show table definition with foreign key references.
9799
# -
98-
ephys.EphysRecording.describe();
100+
ephys.EphysRecording.describe()
99101

100102
# + `heading`: show attribute definitions regardless of foreign key references
101103

@@ -115,14 +117,14 @@
115117
dj.Diagram(subject)
116118

117119
# + [subject](https://github.com/datajoint/element-animal): contains the basic information of subject, including Strain, Line, Subject, Zygosity, and SubjectDeath information.
118-
subject.Subject.describe();
120+
subject.Subject.describe()
119121

120122
# + [`session`](https://github.com/datajoint/element-session): General information of experimental sessions.
121123

122124
dj.Diagram(session)
123125

124126
# + [session](https://github.com/datajoint/element-session): experimental session information
125-
session.Session.describe();
127+
session.Session.describe()
126128

127129
# + [`ephys`](https://github.com/datajoint/element-array-ephys): Neuropixel based probe and ephys information
128130

0 commit comments

Comments
 (0)