diff --git a/app/models/job/JobService.scala b/app/models/job/JobService.scala index 4f24cda0e87..2d2d0608b47 100644 --- a/app/models/job/JobService.scala +++ b/app/models/job/JobService.scala @@ -261,10 +261,14 @@ class JobService @Inject()(wkConf: WkConf, private def getJobCostPerGVx(jobCommand: JobCommand): Fox[BigDecimal] = jobCommand match { - case JobCommand.infer_neurons => Fox.successful(wkConf.Features.neuronInferralCostPerGVx) - case JobCommand.infer_mitochondria => Fox.successful(wkConf.Features.mitochondriaInferralCostPerGVx) - case JobCommand.align_sections => Fox.successful(wkConf.Features.alignmentCostPerGVx) - case _ => Fox.failure(s"Unsupported job command $jobCommand") + case JobCommand.infer_neurons => Fox.successful(wkConf.Features.neuronInferralCostPerGVx) + case JobCommand.infer_nuclei => Fox.successful(wkConf.Features.mitochondriaInferralCostPerGVx) + case JobCommand.infer_mitochondria => Fox.successful(wkConf.Features.mitochondriaInferralCostPerGVx) + case JobCommand.infer_instances => Fox.successful(wkConf.Features.mitochondriaInferralCostPerGVx) + case JobCommand.train_neuron_model => Fox.successful(BigDecimal(0)) + case JobCommand.train_instance_model => Fox.successful(BigDecimal(0)) + case JobCommand.align_sections => Fox.successful(wkConf.Features.alignmentCostPerGVx) + case _ => Fox.failure(s"Unsupported job command $jobCommand") } def calculateJobCostInCredits(boundingBoxInTargetMag: BoundingBox, jobCommand: JobCommand): Fox[BigDecimal] = diff --git a/docs/automation/ai_segmentation.md b/docs/automation/ai_segmentation.md index 5e71973c595..08c8ae87b17 100644 --- a/docs/automation/ai_segmentation.md +++ b/docs/automation/ai_segmentation.md @@ -4,40 +4,50 @@ While WEBKNOSSOS is great for manual annotation, some datasets are either too bi The automated analysis features are designed to provide a general solution to a wide range of (EM) datasets. Since datasets differ in staining protocols, imaging modalities, imaging resolution & fidelity, your results may vary. [Please contact us](mailto:hello@webknossos.org) for customized, fine-tuned solutions for your dataset. -We plan to add more automated analysis features in the future. If you want to work with us on an automated analysis project, [please contact us](mailto:hello@webknossos.org). -We would love to integrate analysis solutions for more modalities and use cases. +You can launch the AI analysis dialog using the `AI Analysis` button in the toolbar at the top. This will open a dropdown menu with three options: -!!!info - Automated analysis is only available on [webknossos.org](https://webknossos.org) at the moment. - If you want to set up on-premise automated analysis at your institute/workplace, then [please contact sales](mailto:sales@webknossos.org). +- **Run AI model:** Run pre-trained or custom AI models on your dataset. +- **Train AI model:** Train your own AI models on your WEBKNOSSOS annotations to match your specific needs. Coming soon. +- **AI Alignment:** Align datasets. Read more about [image alignment](./alignment.md). + + +## Pre-trained Models + +WEBKNOSSOS offers several pre-trained models to get you started quickly: + +* **Neuron Segmentation:** This analysis is designed to work with serial electron microscopy data of neural tissue (brain/cortex) and will segment all neurons within the dataset. It is tuned for serial block-face electron microscopy (SBEM), MultiSEM and focused ion beam-SEM (FIB-SEM) data. +* **Mitochondria Detection:** Run a pre-trained instance segmentation model for mitochondria detection. Optimized for EM data. +* **Nuclei Detection:** (Coming Soon) Run a pre-trained instance segmentation model for nuclei detection. Optimized for EM data. + +## Your Custom Models + +This section will list any custom models that you have trained or uploaded to your organization. While these build on the foundation of our pre-trained models, you can customize your models to identify or segment biological structures of your interest or fine-tune models to the contrast and staining of your images. Training will be enabled soon. -## Neuron Segmentation -As a first trial, WEBKNOSSOS includes neuron segmentation. This analysis is designed to work with serial block-face electron microscopy (SBEM) data of neural tissue (brain/cortex) and will segment all neurons within the dataset. +## Analysis Settings -You can launch the AI analysis dialog using the `AI Analysis` button in the toolbar at the top. Use the `Start AI neuron segmentation` button in the dialog to start the analysis. +Before starting the analysis, you need to configure the following settings: -![Neuron segmentations can be launched from the tool bar.](../images/process_dataset.jpg) -/// caption -Neuron segmentations can be launched from the tool bar. -/// +* **New Dataset Name:** The name of the new dataset that will be created with the segmentation results. +* **Image Data Layer:** The image layer from your current dataset that the model will use for analysis. +* **Bounding Box:** The region of interest that you want to analyze. You can choose to analyze the full dataset or a specific bounding box that you have created. -Computation time for this analysis depends directly on the size of your dataset. +You can also access **Advanced Settings** to further customize the analysis. + +## Credit Information + +This section provides an overview of your available credits in your organization and the estimated cost for the analysis. Cost varies depending on the size of your dataset and the type of model you want to run. + +--- + +Computation time for any analysis depends directly on the size of your dataset. Expect a few hours for medium-sized volumetric EM datasets. The finished analysis will be available as a new dataset from your dashboard. You can monitor the status and progress of the analysis job from the [`Processing Jobs` page](./jobs.md) or wait for the email notification. -![Starting a new neuron segmentation.](../images/neuron_segmentation_start.jpeg) -/// caption -Starting a new neuron segmentation. -/// -![Monitor the segmentation progress from the Jobs page.](../images/nuclei_segmentation_job.jpeg) -/// caption -Monitor the segmentation progress from the Jobs page. -/// -## Mitochondria detection -Similarly to the neuron segmentation, you can now start a mitochondria detection in WEBKNOSSOS. In the AI analysis dialog, choose `Mitochondria detection`, select a bounding box, and go to `Processing Jobs` to look at the result. - -## Custom Analysis -You'll soon be able to train custom classifiers directly in WEBKNOSSOS. [Contact us](mailto:hello@webknossos.org) for an invite to join the beta program. +We plan to add more AI analysis features in the future. If you want to work with us on an automated analysis project, [please contact us](mailto:hello@webknossos.org). +We would love to integrate analysis solutions for more modalities and use cases. -If you are interested in specialized, automated analysis, image segmentation, object detection etc. then feel free to [contact us](mailto:hello@webknossos.org). The WEBKNOSSOS development teams offers [commercial analysis services](https://webknossos.org/services/automated-segmentation) for that. \ No newline at end of file + +!!! info + Automated analysis is only available on [webknossos.org](https://webknossos.org) at the moment. + If you want to set up on-premise automated analysis at your institute/workplace, then [please contact sales](mailto:sales@webknossos.org). diff --git a/docs/automation/ai_training.md b/docs/automation/ai_training.md new file mode 100644 index 00000000000..a41bb85ce62 --- /dev/null +++ b/docs/automation/ai_training.md @@ -0,0 +1,58 @@ +# AI Model Training + +WEBKNOSSOS allows you to train your own AI models for image segmentation. This feature is currently in early access. + + +!!! info + AI Model Training is only available on [webknossos.org](https://webknossos.org) at the moment. + If you want to set up on-premise automated analysis at your institute/workplace, then [please contact sales](mailto:sales@webknossos.org). + + + +## Preparing Your Annotations + +Before you can start training, prepare your ground truth annotation(s). The training process requires at least one volume annotation with at least one bounding box. + +Here is a step-by-step guide to prepare your data: + +1. **Create an annotation:** Start by creating a new annotation or opening an existing one. +2. **Define bounding boxes:** Create one or more bounding boxes that define the areas you want to use for training. + - It is important that the bounding boxes are not too small. WEBKNOSSOS checks that each bounding box has a minimum extent of **32 voxels in each dimension**. + - Bounding boxes that are not aligned with the selected magnification will be automatically shrunk to fit. + - For optimal training, all bounding boxes should have dimensions that are multiples of the smallest box dimensions. +3. **Label segments:** Within your bounding boxes, label the segmentation of your structures of interest. Use the volume annotation tool to manually annotate structures. This will be your ground truth data. For neurons, we recommend to densely label each structure with a unique ID. For instances segmentations you only need to label the structures you want to train on, e.g. nuclei, mitochondria, soma, vesicles, etc. + +## Configuring the Training +To start a training, click on the `AI Analysis` button in the toolbar and select `Train AI model` from the dropdown menu. +This will open a dialog where you can configure and start your training job. +### Select AI Training Task + +First, you need to select the type of model you want to train. Both models are optimized for SEM, FIB-SEM, SBEM, and Multi-SEM microscopes: + +* **EM Neuron Model:** Train a new AI model for dense EM neuron segmentation. +* **EM Instances Model:** Train a new AI model for EM instance segmentation. This is optimized for nuclei, mitochondria, and other cell types. +### Training Data + +In this section, you need to specify the data that will be used for training. + +* **Image Data Layer:** Select the raw image layer. +* **Ground Truth Layer:** Select the segmentation layer that you created. +* **Magnification:** Choose the magnification that should be used for training. + +You can also add more training data from other annotations by clicking the `+` button and referencing annotations by ID or WEBKNOSSOS URLs. + +### Training Settings + +* **Model Name:** Give your new model a descriptive name. +* **Comments:** Add any comments or notes about the training for future reference. +* **Max Distance (nm):** (Only for EM Instances Model) The maximum cross-section length ("diameter") for each identified object in nanometers e.g. Nuclei: 1000nm, Vesicles: 80nm. + +### Credit Information + +This section provides an overview of your available credits in your organization and the estimated cost for the training. Cost varies depending on the size of your dataset and the type of model you want to train. + +## Launching the Training + +After configuring everything, you can start the training by clicking the `Start Training` button. You can monitor the progress of your training job from the [`Processing Jobs` page](./jobs.md) or wait for the email notification. Training might take a few hours depending on the size of your dataset. + +Once the training is finished, you can find an overview of all your trained models on the `Admin` > `AI Models` page in the navbar. Please refer to the [AI Segmentation](./ai_segmentation.md) guide for more information on how to apply your trained models to your dataset. diff --git a/docs/automation/alignment.md b/docs/automation/alignment.md index 02e4dc002b1..3fe1f9e7c80 100644 --- a/docs/automation/alignment.md +++ b/docs/automation/alignment.md @@ -1,8 +1,24 @@ -# Alignment +# Image Alignment For single-tile image stacks, an alignment is directly possible from within WEBKNOSSOS. -Simply upload the dataset, open it and select the "Alignment" tab in the AI Analysis dialog. +Simply upload the dataset, open it and select the "AI Analysis" button in the toolbar. From the dropdown, choose "Run AI Alignment". -You can even annotate landmarks with the skeleton tool and use that to let WEBKNOSSOS align the dataset. Often these landmarks are not necessary, but for particularly hard to align sections, they can be quite useful. When manual landmarks are used, they don't need to cover the entire dataset. +This will open a dialog where you can configure and start the alignment. + +## Select AI Alignment Task + +* **Align Sections:** Align all sections of this dataset along the Z-axis using features in neighboring sections. This only supports datasets with a single tile per section (no in-plane stitching needed). +* **Align & stitch multiple tiles:** (Coming Soon) For stitching and aligning datasets with multiple tiles per section, please contact us via email for a quote. [Learn more about our alignment services.](https://webknossos.org/services/alignment) + +## Alignment Settings + +* **New Dataset Name:** The name of the new dataset that will be created with the aligned images. +* **Manual Matches:** You can use manual matches from a skeleton annotation to help the alignment process in specific spots. This can be useful for particularly hard-to-align sections. When manual landmarks are used, they don't need to cover the entire dataset. Instead, these manual landmarks can be placed to help with tricky cases such as a big gap, tear or jump between two sections. + +## Credit Information + +This section provides an overview of your available credits in your organization and the estimated cost for the alignment. Cost varies depending on the size of your dataset. + +Computation time for the alignment depends directly on the size of your dataset. The finished analysis will be available as a new dataset from your dashboard. You can monitor the status and progress of the analysis job from the [`Processing Jobs` page](./jobs.md) or wait for the email notification. For multi-tile image stacks, please refer to our [Alignment services](https://webknossos.org/services/alignment). diff --git a/docs/automation/jobs.md b/docs/automation/jobs.md index c378e709656..4ac940ff675 100644 --- a/docs/automation/jobs.md +++ b/docs/automation/jobs.md @@ -6,7 +6,7 @@ WEBKNOSSOS will notify you via email upon completion or failure of any job. Example workflows: -- [AI segmentation](./ai_segmentation.md), e.g., nuclei inferral +- [AI segmentation](./ai_segmentation.md), e.g. running AI model for nuclei segmentation and mitchondria detection - [converting datasets on upload](../data/upload_ui.md) - [mesh file (pre)-computation](../meshes/precomputed_meshes.md) - [applying a merger mode annotation](../proofreading/merger_mode.md) diff --git a/docs/data/wkw.md b/docs/data/wkw.md index 408b1379260..7e68e2d0f8c 100644 --- a/docs/data/wkw.md +++ b/docs/data/wkw.md @@ -56,7 +56,7 @@ Volume annotations can be downloaded and imported using ZIP files that contain [ The ZIP archive contains one NML file that holds meta information including the dataset name and the user's position. Additionally, there is another embedded ZIP file that contains the volume annotations in WKW file format. -!!!info +!!! info In contrast to on-disk WKW datasets, the WKW files in downloaded volume annotations only contain a single 32^3 bucket in each file. Therefore, also the addressing of the WKW files (e.g. `z48/y5444/x5748.wkw`) is in steps of 32 instead of 1024. diff --git a/docs/proofreading/proofreading_tool.md b/docs/proofreading/proofreading_tool.md index 84479dd805b..7ea60735d9d 100644 --- a/docs/proofreading/proofreading_tool.md +++ b/docs/proofreading/proofreading_tool.md @@ -31,5 +31,5 @@ If case you want to reload, hide or remove a 3D mesh during proofreading, you ca In addition to the handy shortcuts available from the right-click context menu, users can also directly modify the supervoxel graph like any other skeleton to manually add/remove nodes and edges for fine-grained control. -!!!info +!!! info The proofreading tool requires a supervoxel graph representation of a segmentation to work. At this time, these can only be obtained from the [Voxelytics AI segmentation pipeline](https://voxelytics.com). We are actively working to make this available for more users, so please reach out to us to get you started and provide feedback: [hello@webknossos.org](mailto:hello@webknossos.org) diff --git a/docs/sharing/dataset_sharing.md b/docs/sharing/dataset_sharing.md index 5591cce93e6..f932286fd41 100644 --- a/docs/sharing/dataset_sharing.md +++ b/docs/sharing/dataset_sharing.md @@ -66,5 +66,5 @@ To share a dataset publicly, follow these steps: It is recommended to add a description before sharing a dataset publicly. /// -!!!info +!!! info We recommend giving your datasets a meaningful name and description. Both are featured next to the data viewport in the `Info` tab in the main WEBKNOSSOS UI. \ No newline at end of file diff --git a/frontend/javascripts/admin/api/jobs.ts b/frontend/javascripts/admin/api/jobs.ts index 05b62541f41..21f180b125c 100644 --- a/frontend/javascripts/admin/api/jobs.ts +++ b/frontend/javascripts/admin/api/jobs.ts @@ -11,7 +11,7 @@ import type { RenderAnimationOptions, } from "types/api_types"; import type { UnitLong, Vector3, Vector6 } from "viewer/constants"; -import type { SplitMergerEvaluationSettings } from "viewer/view/action-bar/ai_job_modals/components/collapsible_split_merger_evaluation_settings"; +import type { SplitMergerEvaluationSettings } from "viewer/view/ai_jobs/components/collapsible_split_merger_evaluation_settings"; import { assertResponseLimit } from "./api_utils"; function transformBackendJobToAPIJob(job: any): APIJob { @@ -188,7 +188,7 @@ export function startComputeSegmentIndexFileJob( }); } -export function startNucleiInferralJob( +export function runPretrainedNucleiInferenceJob( datasetId: string, layerName: string, newDatasetName: string, @@ -204,7 +204,7 @@ export function startNucleiInferralJob( }); } -export function startNeuronInferralJob( +export function runPretrainedNeuronInferencelJob( datasetId: string, layerName: string, bbox: Vector6, @@ -322,7 +322,7 @@ export function startMaterializingVolumeAnnotationJob( ); } -export function startMitochondriaInferralJob( +export function runPretrainedMitochondriaInferenceJob( datasetId: string, layerName: string, bbox: Vector6, @@ -368,7 +368,7 @@ export enum APIAiModelCategory { EM_NUCLEI = "em_nuclei", } -type AiModelTrainingAnnotationSpecification = { +export type AiModelTrainingAnnotationSpecification = { annotationId: string; colorLayerName: string; segmentationLayerName: string; @@ -406,7 +406,7 @@ export function runInstanceModelTraining(params: RunInstanceModelTrainingParamet }); } -export type BaseModelInferenceParameters = { +export type BaseCustomModelInferenceParameters = { annotationId?: string; aiModelId: string; datasetDirectoryName: string; @@ -418,21 +418,21 @@ export type BaseModelInferenceParameters = { invertColorLayer: boolean; // maskAnnotationLayerName?: string | null }; -type RunNeuronModelInferenceParameters = BaseModelInferenceParameters; +type RunCustomNeuronModelInferenceParameters = BaseCustomModelInferenceParameters; -type RunInstanceModelInferenceParameters = BaseModelInferenceParameters & { +type RunCustomInstanceModelInferenceParameters = BaseCustomModelInferenceParameters & { seedGeneratorDistanceThreshold: number; }; -export function runNeuronModelInferenceWithAiModelJob(params: RunNeuronModelInferenceParameters) { +export function runCustomNeuronModelInferenceJob(params: RunCustomNeuronModelInferenceParameters) { return Request.sendJSONReceiveJSON("/api/aiModels/inferences/runCustomNeuronModelInference", { method: "POST", data: JSON.stringify({ ...params, boundingBox: params.boundingBox.join(",") }), }); } -export function runInstanceModelInferenceWithAiModelJob( - params: RunInstanceModelInferenceParameters, +export function runCustomInstanceModelInferenceJob( + params: RunCustomInstanceModelInferenceParameters, ) { return Request.sendJSONReceiveJSON("/api/aiModels/inferences/runCustomInstanceModelInference", { method: "POST", diff --git a/frontend/javascripts/admin/voxelytics/ai_model_list_view.tsx b/frontend/javascripts/admin/voxelytics/ai_model_list_view.tsx index dafd7fc4a35..5fac48363f6 100644 --- a/frontend/javascripts/admin/voxelytics/ai_model_list_view.tsx +++ b/frontend/javascripts/admin/voxelytics/ai_model_list_view.tsx @@ -1,13 +1,7 @@ -import { - EyeOutlined, - FileTextOutlined, - PlusOutlined, - SyncOutlined, - TeamOutlined, -} from "@ant-design/icons"; +import { EyeOutlined, FileTextOutlined, SyncOutlined, TeamOutlined } from "@ant-design/icons"; import { JobState, getShowTrainingDataLink } from "admin/job/job_list_view"; import { getAiModels, getUsersOrganizations, updateAiModel } from "admin/rest_api"; -import { Button, Col, Modal, Row, Select, Space, Table, Typography } from "antd"; +import { Button, Col, Modal, Row, Select, Table, Typography } from "antd"; import FormattedDate from "components/formatted_date"; import { PageNotAvailableToNormalUser } from "components/permission_enforcer"; import { useFetch, useGuardedFetch } from "libs/react_helpers"; @@ -16,19 +10,14 @@ import Toast from "libs/toast"; import _ from "lodash"; import { useState } from "react"; import type { Key } from "react"; -import type { Vector3 } from "viewer/constants"; -import { getMagInfo, getSegmentationLayerByName } from "viewer/model/accessors/dataset_accessor"; import { formatUserName } from "viewer/model/accessors/user_accessor"; -import { TrainAiModelForm } from "viewer/view/action-bar/ai_job_modals/forms/train_ai_model_form"; -import type { AnnotationInfoForAITrainingJob } from "viewer/view/action-bar/ai_job_modals/utils"; import { Link } from "react-router-dom"; -import type { APIAnnotation, AiModel } from "types/api_types"; +import type { AiModel } from "types/api_types"; export default function AiModelListView() { const activeUser = useWkSelector((state) => state.activeUser); const [refreshCounter, setRefreshCounter] = useState(0); - const [isTrainModalVisible, setIsTrainModalVisible] = useState(false); const [currentlyEditedModel, setCurrentlyEditedModel] = useState(null); const [aiModels, isLoading] = useGuardedFetch( getAiModels, @@ -43,9 +32,6 @@ export default function AiModelListView() { return (
- {isTrainModalVisible ? ( - setIsTrainModalVisible(false)} /> - ) : null} {currentlyEditedModel ? ( ) : null}
- - - - +

AI Models

void }) { - const [annotationInfosForAiJob, setAnnotationInfosForAiJob] = useState< - AnnotationInfoForAITrainingJob[] - >([]); - - const getMagsForSegmentationLayer = (annotationId: string, layerName: string) => { - // The layer name is a human-readable one. It can either belong to an annotationLayer - // (therefore, also to a volume tracing) or to the actual dataset. - // Both are checked below. This won't be ambiguous because annotationLayers must not - // have names that dataset layers already have. - - const annotationWithDataset = annotationInfosForAiJob.find(({ annotation }) => { - return annotation.id === annotationId; - }); - if (annotationWithDataset == null) { - throw new Error("Cannot find annotation for specified id."); - } - - const { annotation, dataset, volumeTracings, volumeTracingMags } = annotationWithDataset; - - let annotationLayer = annotation.annotationLayers.find((l) => l.name === layerName); - if (annotationLayer != null) { - const volumeTracingIndex = volumeTracings.findIndex( - (tracing) => tracing.tracingId === annotationLayer.tracingId, - ); - const mags = volumeTracingMags[volumeTracingIndex] || [{ mag: [1, 1, 1] as Vector3 }]; - return getMagInfo(mags); - } else { - const segmentationLayer = getSegmentationLayerByName(dataset, layerName); - return getMagInfo(segmentationLayer.mags); - } - }; - - return ( - - - AI Analysis - - } - onCancel={onClose} - footer={null} - maskClosable={false} - > - { - setAnnotationInfosForAiJob([...annotationInfosForAiJob, ...newItems]); - }} - /> - - ); -} - const renderActionsForModel = (model: AiModel, onChangeSharedOrganizations: () => void) => { const organizationSharingButton = model.isOwnedByUsersOrganization ? ( diff --git a/frontend/javascripts/components/stacked_bar_chart.tsx b/frontend/javascripts/components/stacked_bar_chart.tsx index d27ecb4dfc4..f89b5288317 100644 --- a/frontend/javascripts/components/stacked_bar_chart.tsx +++ b/frontend/javascripts/components/stacked_bar_chart.tsx @@ -1,10 +1,12 @@ -import _ from "lodash"; +import sum from "lodash/sum"; + export const colors = { finished: "#52c41a", active: "#1890ff", open: "rgb(255, 85, 0)", }; const indexToType = ["finished", "active", "open"]; + export default function StackedBarChart({ a, b, c }: { a: number; b: number; c: number }) { const total = a + b + c; const percentages = [a, b, c].map((el) => Math.ceil((el / total) * 100)); @@ -15,7 +17,7 @@ export default function StackedBarChart({ a, b, c }: { a: number; b: number; c: p === 0 ? 0 : Math.max(minPercentage, p * bufferFactor), ); - const upscaleFactor = 100 / _.sum(renderedPercentages); + const upscaleFactor = 100 / sum(renderedPercentages); renderedPercentages = renderedPercentages.map((p) => p * upscaleFactor); return ( diff --git a/frontend/javascripts/libs/toast.tsx b/frontend/javascripts/libs/toast.tsx index dc6d793a77a..ab84b2f6c00 100644 --- a/frontend/javascripts/libs/toast.tsx +++ b/frontend/javascripts/libs/toast.tsx @@ -96,8 +96,8 @@ const Toast = { , ): BoundingBoxMinMaxType { diff --git a/frontend/javascripts/types/api_types.ts b/frontend/javascripts/types/api_types.ts index 20f958467c8..f04bce8ad3c 100644 --- a/frontend/javascripts/types/api_types.ts +++ b/frontend/javascripts/types/api_types.ts @@ -787,11 +787,11 @@ export enum APIJobType { FIND_LARGEST_SEGMENT_ID = "find_largest_segment_id", INFER_NUCLEI = "infer_nuclei", INFER_NEURONS = "infer_neurons", + INFER_MITOCHONDRIA = "infer_mitochondria", + INFER_INSTANCES = "infer_instances", MATERIALIZE_VOLUME_ANNOTATION = "materialize_volume_annotation", TRAIN_NEURON_MODEL = "train_neuron_model", TRAIN_INSTANCE_MODEL = "train_instance_model", - INFER_MITOCHONDRIA = "infer_mitochondria", - INFER_INSTANCES = "infer_instances", // Only used for backwards compatibility, e.g. to display results. DEPRECATED_INFER_WITH_MODEL = "infer_with_model", DEPRECATED_TRAIN_MODEL = "train_model", diff --git a/frontend/javascripts/viewer/default_state.ts b/frontend/javascripts/viewer/default_state.ts index 165f954df70..548b3ddd081 100644 --- a/frontend/javascripts/viewer/default_state.ts +++ b/frontend/javascripts/viewer/default_state.ts @@ -246,7 +246,7 @@ const defaultState: WebknossosState = { showMergeAnnotationModal: false, showZarrPrivateLinksModal: false, showPythonClientModal: false, - aIJobModalState: "invisible", + aIJobDrawerState: "invisible", showRenderAnimationModal: false, showShareModal: false, storedLayouts: {}, diff --git a/frontend/javascripts/viewer/model/actions/ui_actions.ts b/frontend/javascripts/viewer/model/actions/ui_actions.ts index b71682cb42e..aedaf7eeb3e 100644 --- a/frontend/javascripts/viewer/model/actions/ui_actions.ts +++ b/frontend/javascripts/viewer/model/actions/ui_actions.ts @@ -1,7 +1,7 @@ import type { OrthoView, Vector3 } from "viewer/constants"; import type { AnnotationTool } from "viewer/model/accessors/tool_accessor"; import type { BorderOpenStatus, Theme, WebknossosState } from "viewer/store"; -import type { StartAIJobModalState } from "viewer/view/action-bar/ai_job_modals/constants"; +import type { StartAiJobDrawerState } from "viewer/view/ai_jobs/constants"; type SetDropzoneModalVisibilityAction = ReturnType; type SetVersionRestoreVisibilityAction = ReturnType; @@ -18,7 +18,7 @@ type SetShareModalVisibilityAction = ReturnType; type SetBusyBlockingInfoAction = ReturnType; type SetPythonClientModalVisibilityAction = ReturnType; -type SetAIJobModalStateAction = ReturnType; +type SetaIJobDrawerStateAction = ReturnType; export type EnterAction = ReturnType; export type EscapeAction = ReturnType; export type SetQuickSelectStateAction = ReturnType; @@ -53,7 +53,7 @@ export type UiAction = | SetDownloadModalVisibilityAction | SetPythonClientModalVisibilityAction | SetShareModalVisibilityAction - | SetAIJobModalStateAction + | SetaIJobDrawerStateAction | SetRenderAnimationModalVisibilityAction | SetMergeModalVisibilityAction | SetUserScriptsModalVisibilityAction @@ -133,9 +133,9 @@ export const setShareModalVisibilityAction = (visible: boolean) => type: "SET_SHARE_MODAL_VISIBILITY", visible, }) as const; -export const setAIJobModalStateAction = (state: StartAIJobModalState) => +export const setAIJobDrawerStateAction = (state: StartAiJobDrawerState) => ({ - type: "SET_AI_JOB_MODAL_STATE", + type: "SET_AI_JOB_DRAWER_STATE", state, }) as const; export const setRenderAnimationModalVisibilityAction = (visible: boolean) => diff --git a/frontend/javascripts/viewer/model/reducers/ui_reducer.ts b/frontend/javascripts/viewer/model/reducers/ui_reducer.ts index c5d4bee407f..268c90fac17 100644 --- a/frontend/javascripts/viewer/model/reducers/ui_reducer.ts +++ b/frontend/javascripts/viewer/model/reducers/ui_reducer.ts @@ -102,9 +102,9 @@ function UiReducer(state: WebknossosState, action: Action): WebknossosState { }); } - case "SET_AI_JOB_MODAL_STATE": { + case "SET_AI_JOB_DRAWER_STATE": { return updateKey(state, "uiInformation", { - aIJobModalState: action.state, + aIJobDrawerState: action.state, }); } diff --git a/frontend/javascripts/viewer/store.ts b/frontend/javascripts/viewer/store.ts index ef15f0a3a02..58f3ee856e8 100644 --- a/frontend/javascripts/viewer/store.ts +++ b/frontend/javascripts/viewer/store.ts @@ -82,7 +82,7 @@ import { eventEmitterMiddleware } from "./model/helpers/event_emitter_middleware import FlycamInfoCacheReducer from "./model/reducers/flycam_info_cache_reducer"; import OrganizationReducer from "./model/reducers/organization_reducer"; import ProofreadingReducer from "./model/reducers/proofreading_reducer"; -import type { StartAIJobModalState } from "./view/action-bar/ai_job_modals/constants"; +import type { StartAiJobDrawerState } from "./view/ai_jobs/constants"; export type { BoundingBoxObject } from "types/bounding_box"; @@ -496,7 +496,7 @@ type UiInformation = { readonly showMergeAnnotationModal: boolean; readonly showZarrPrivateLinksModal: boolean; readonly showAddScriptModal: boolean; - readonly aIJobModalState: StartAIJobModalState; + readonly aIJobDrawerState: StartAiJobDrawerState; readonly showRenderAnimationModal: boolean; readonly activeTool: AnnotationTool; readonly activeUserBoundingBoxId: number | null | undefined; diff --git a/frontend/javascripts/viewer/view/action-bar/ai_job_modals/components/annotations_csv_input.tsx b/frontend/javascripts/viewer/view/action-bar/ai_job_modals/components/annotations_csv_input.tsx deleted file mode 100644 index f837b8a015c..00000000000 --- a/frontend/javascripts/viewer/view/action-bar/ai_job_modals/components/annotations_csv_input.tsx +++ /dev/null @@ -1,162 +0,0 @@ -import { getAnnotationsForTask } from "admin/api/tasks"; -import { - getDataset, - getTracingForAnnotationType, - getUnversionedAnnotationInformation, -} from "admin/rest_api"; -import { Button, Form, Input } from "antd"; -import type { RuleObject } from "antd/es/form"; -import Toast from "libs/toast"; -import * as Utils from "libs/utils"; -import { useCallback, useState } from "react"; -import { type APIAnnotation, AnnotationLayerEnum, type ServerVolumeTracing } from "types/api_types"; -import type { Vector3 } from "viewer/constants"; -import { convertUserBoundingBoxesFromServerToFrontend } from "viewer/model/reducers/reducer_helpers"; -import { serverVolumeToClientVolumeTracing } from "viewer/model/reducers/volumetracing_reducer"; -import type { AnnotationInfoForAITrainingJob } from "../utils"; - -const { TextArea } = Input; - -export function AnnotationsCsvInput({ - onAdd, -}: { onAdd: (newItems: Array>) => void }) { - const [value, setValue] = useState(""); - const onClickAdd = useCallback(async () => { - const annotationIdsForTraining = []; - const unfinishedTasks = []; - - const lines = value - .split("\n") - .map((line) => line.trim()) - .filter((line) => line !== ""); - for (const taskOrAnnotationIdOrUrl of lines) { - if (taskOrAnnotationIdOrUrl.includes("/")) { - const lastSegment = taskOrAnnotationIdOrUrl.split("/").at(-1); - if (lastSegment) { - annotationIdsForTraining.push(lastSegment); - } - } else { - let isTask = true; - try { - const annotations = await getAnnotationsForTask(taskOrAnnotationIdOrUrl, { - showErrorToast: false, - }); - const finishedAnnotations = annotations.filter(({ state }) => state === "Finished"); - if (finishedAnnotations.length > 0) { - annotationIdsForTraining.push(...finishedAnnotations.map(({ id }) => id)); - } else { - unfinishedTasks.push(taskOrAnnotationIdOrUrl); - } - } catch (_e) { - isTask = false; - } - if (!isTask) { - annotationIdsForTraining.push(taskOrAnnotationIdOrUrl); - } - } - } - - const newAnnotationsWithDatasets = await Promise.all( - annotationIdsForTraining.map(async (annotationId) => { - const annotation = await getUnversionedAnnotationInformation(annotationId); - const dataset = await getDataset(annotation.datasetId); - - const volumeServerTracings: ServerVolumeTracing[] = await Promise.all( - annotation.annotationLayers - .filter((layer) => layer.typ === "Volume") - .map( - (layer) => - getTracingForAnnotationType(annotation, layer) as Promise, - ), - ); - const volumeTracings = volumeServerTracings.map((tracing) => - serverVolumeToClientVolumeTracing(tracing, null, null), - ); - // A copy of the user bounding boxes of an annotation is saved in every tracing. In case no volume tracing exists, the skeleton tracing is checked. - let userBoundingBoxes = volumeTracings[0]?.userBoundingBoxes; - if (!userBoundingBoxes) { - const skeletonLayer = annotation.annotationLayers.find( - (layer) => layer.typ === AnnotationLayerEnum.Skeleton, - ); - if (skeletonLayer) { - const skeletonTracing = await getTracingForAnnotationType(annotation, skeletonLayer); - userBoundingBoxes = convertUserBoundingBoxesFromServerToFrontend( - skeletonTracing.userBoundingBoxes, - undefined, - ); - } else { - throw new Error( - `Annotation ${annotation.id} has neither a volume nor a skeleton layer`, - ); - } - } - if (annotation.task?.boundingBox) { - const largestId = Math.max(...userBoundingBoxes.map(({ id }) => id)); - userBoundingBoxes.push({ - name: "Task Bounding Box", - boundingBox: Utils.computeBoundingBoxFromBoundingBoxObject(annotation.task.boundingBox), - color: [0, 0, 0], - isVisible: true, - id: largestId + 1, - }); - } - const volumeTracingMags = volumeServerTracings.map(({ mags }) => - mags - ? mags.map((mag) => ({ mag: Utils.point3ToVector3(mag) })) - : [{ mag: [1, 1, 1] as Vector3 }], - ); - return { - annotation, - dataset, - volumeTracings, - volumeTracingMags, - userBoundingBoxes: userBoundingBoxes || [], - }; - }), - ); - if (unfinishedTasks.length > 0) { - Toast.warning( - `The following tasks have no finished annotations: ${unfinishedTasks.join(", ")}`, - ); - } - onAdd(newAnnotationsWithDatasets); - }, [value, onAdd]); - - const validator = useCallback((_rule: RuleObject, value: string) => { - const valid = value.split("\n").every((line) => !line.includes("#") && !line.includes(",")); - - return valid - ? Promise.resolve() - : Promise.reject( - new Error("Each line should only contain an annotation ID or URL (without # or ,)"), - ); - }, []); - - return ( -
- -