diff --git a/docs/diffcrash/run.md b/docs/diffcrash/run.md index 878c10f..98a0dcf 100644 --- a/docs/diffcrash/run.md +++ b/docs/diffcrash/run.md @@ -7,7 +7,7 @@ analysis of simulation runs. It can be used as a set of independent executables or as a postprocessor plugin. [Diffcrash] itself must be licensed. Please therefore contact [Sidact] directly. -This commmand line utility makes running a Diffcrash analysis much +This command line utility makes running a Diffcrash analysis much easier. [diffcrash]: https://www.sidact.de/diffcrash diff --git a/docs/dimred/command_line_interface.md b/docs/dimred/command_line_interface.md index a682674..7a7b4c9 100644 --- a/docs/dimred/command_line_interface.md +++ b/docs/dimred/command_line_interface.md @@ -209,7 +209,7 @@ optional arguments: Arguments for clustering algorithms. If not set, clustering will be skipped. --outlier-args [OUTLIER_ARGS [OUTLIER_ARGS ...]] - Arguments for outlier detection befor clustering. + Arguments for outlier detection before clustering. ``` Following arguments are required for the analysis: @@ -322,8 +322,8 @@ through: Next, we will take a look at the `--start-stage` and `--end-stage` arguments. These allow to restart and end the command-line utility at certain points in the process. -This is usefull if you don't want to repeat certain stages to save time, or want -to end the process prematurly, e.g. don't want to generate the `.html` output. +This is useful if you don't want to repeat certain stages to save time, or want +to end the process prematurely, e.g. don't want to generate the `.html` output. To set the desired start and end stage, use the the following keywords: @@ -431,7 +431,7 @@ ref_sample = h5file["subsample"][:] np.stack(h5file["subsampled_runs"][entry][:] for entry in h5file["subsampled_runs"].keys()]) -# create a numpy array containg the right reduced order basis for projection: +# create a numpy array containing the right reduced order basis for projection: v_rob = h5file["v_rob"][:] # the subsampled runs are projected into the right reduced order basis and called betas: diff --git a/docs/dyna/performance_info.md b/docs/dyna/performance_info.md index c0da833..151b868 100644 --- a/docs/dyna/performance_info.md +++ b/docs/dyna/performance_info.md @@ -70,4 +70,4 @@ Since different results may be dumped at different frequencies some state folders might contain more information than others. This inherently prevents efficient memory reading. The python version used here is slower than the original C-based version but -one therefore gains better portability accross operating systems. +one therefore gains better portability across operating systems. diff --git a/docs/femzip/index.md b/docs/femzip/index.md index cb297d0..0288bae 100644 --- a/docs/femzip/index.md +++ b/docs/femzip/index.md @@ -19,7 +19,7 @@ functions such as a femzip file check if desired. ``` Here we check if we can use the extended FEMZIP-API. -The extended FEMZIP-API allows reading selected arrays, but reqires a license +The extended FEMZIP-API allows reading selected arrays, but requires a license with the feature "FEMUNZIPLIB-DYNA", which can be attained from SIDACT or femzip distributors. @@ -37,7 +37,7 @@ True ``` Check the file and library version. -This is usually not neccessary. +This is usually not necessary. ``` python >>> api.is_femunzip_version_ok(filepath) diff --git a/lasso/diffcrash/diffcrash_run.py b/lasso/diffcrash/diffcrash_run.py index f9c5046..1c61a18 100644 --- a/lasso/diffcrash/diffcrash_run.py +++ b/lasso/diffcrash/diffcrash_run.py @@ -1141,7 +1141,7 @@ def _create_merge_input_file(self, directory: str) -> str: merge_input_file.write("eigen_all ! Name of eigen input file\n") merge_input_file.write( "mode_ ! Name of Output file " - + "(string will be apended with mode information)\n" + + "(string will be appended with mode information)\n" ) merge_input_file.write("1 1 ! Mode number to be generated\n") merge_input_file.write("'d+ d-' ! Mode type to be generated\n") diff --git a/lasso/dimred/dimred_run.py b/lasso/dimred/dimred_run.py index 5d2f773..b3f1849 100644 --- a/lasso/dimred/dimred_run.py +++ b/lasso/dimred/dimred_run.py @@ -659,7 +659,7 @@ def _parse_html_name(self, html_name_string: str) -> str: if replace_count > 0: msg = ( f"Replaced {replace_count} invalid characters for the html file name. " - f"The new hmtl name is: {html_name}" + f"The new html name is: {html_name}" ) self.log(msg) diff --git a/lasso/dimred/graph_laplacian.py b/lasso/dimred/graph_laplacian.py index 8ca22da..286aefc 100644 --- a/lasso/dimred/graph_laplacian.py +++ b/lasso/dimred/graph_laplacian.py @@ -126,7 +126,7 @@ def _laplacian(lapl: csgraph, n_eigenmodes: int = 5): n_eigenmodes : int number of eigenmodes to compute points : np.ndarray - coordintes of graph nodes (only for plotting) + coordinates of graph nodes (only for plotting) Returns ------- diff --git a/lasso/dimred/sphere/algorithms.py b/lasso/dimred/sphere/algorithms.py index 29ce986..d76ac2d 100644 --- a/lasso/dimred/sphere/algorithms.py +++ b/lasso/dimred/sphere/algorithms.py @@ -182,7 +182,7 @@ def compute_similarity(embeddings: np.ndarray) -> np.ndarray: return smatrix -def create_historgram( +def create_histogram( cloud: np.ndarray, sphere_axis: str = "Z", planar: bool = False ) -> BinnedStatistic2dResult: """Builds a histogram using the blocks of a sphered globe and returns a diff --git a/lasso/dimred/svd/clustering_betas.py b/lasso/dimred/svd/clustering_betas.py index 8cae9c5..8f84caf 100644 --- a/lasso/dimred/svd/clustering_betas.py +++ b/lasso/dimred/svd/clustering_betas.py @@ -25,7 +25,7 @@ def __apply_spectral_clustering(betas, runids, datasets, idsets, random_state=11 idsets: list List where the grouped ids corresponding to the grouped Betas will be saved **kwargs: keyword arguments - Keyword arguments specific for the SpectralClustering algorythm + Keyword arguments specific for the SpectralClustering algorithm See Also -------- @@ -62,7 +62,7 @@ def __apply_k_means(betas, runids, datasets, idsets, random_state=11, **kwargs): idsets: list List where the grouped ids corresponding to the grouped Betas will be saved **kwargs: keyword arguments - Keyword arguments specific fot the KMeans algorythm + Keyword arguments specific for the KMeans algorithm See Also -------- @@ -85,7 +85,7 @@ def __apply_k_means(betas, runids, datasets, idsets, random_state=11, **kwargs): def __apply_dbscan(betas, runids, datasets, idsets, **kwargs): """ Method to group the input Betas. - Defautl keyword arguments: eps=0.08 + Default keyword arguments: eps=0.08 Parameters ---------- @@ -98,7 +98,7 @@ def __apply_dbscan(betas, runids, datasets, idsets, **kwargs): idsets: list List where the grouped ids corresponding to the grouped Betas will be saved **kwags: keyword arguments - Keyword arguments for the DBSCAN algorythm + Keyword arguments for the DBSCAN algorithm See Also -------- @@ -158,7 +158,7 @@ def __detect_outliers_isolation_forest( betas, ids, beta_clusters, id_clusters, random_state=11, **kwargs ): """ - Detects outliers based on the IsolationForest algorythm from sklearn. + Detects outliers based on the IsolationForest algorithm from sklearn. Detected outliers will be appended into the provided lists Default keyword parameters: random_state=12, behaviour="new", contamination=0.005 @@ -173,7 +173,7 @@ def __detect_outliers_isolation_forest( id_clusters: list List where each cluster of ids will be appended **kwargs: keyword argument - Keywords specific to the IsolationForest algorythm + Keywords specific to the IsolationForest algorithm Returns ------- inlier_betas: np.array @@ -198,7 +198,7 @@ def __detect_outliers_isolation_forest( def __detect_outliers_local_outlier_factor(betas, ids, beta_clusters, id_clusters, **kwargs): """ - Detects outliers based on the LocalOutlierFactor algorythm from sklearn. + Detects outliers based on the LocalOutlierFactor algorithm from sklearn. Detected outliers will be appended into the provided lists Default keyword parameters: contamination=0.01 @@ -213,7 +213,7 @@ def __detect_outliers_local_outlier_factor(betas, ids, beta_clusters, id_cluster id_clusters: list List where each cluster of ids will be appended **kwargs: keyword argument - Keywords specific to the LocalOutlierFactor algorythm. + Keywords specific to the LocalOutlierFactor algorithm. Returns ------- inlier_betas: np.ndarray @@ -238,9 +238,9 @@ def __detect_outliers_local_outlier_factor(betas, ids, beta_clusters, id_cluster def __detect_outliers_one_class_svm(betas, ids, beta_clusters, id_clusters, **kwargs): """ - Detects outliers based on the OneClassSVM algorythm from sklearn. + Detects outliers based on the OneClassSVM algorithm from sklearn. Detected outliers will be appended into the provided lists - Defautl keyword arguments: gamma=0.1, nu=0.01 + Default keyword arguments: gamma=0.1, nu=0.01 Parameters ---------- @@ -253,7 +253,7 @@ def __detect_outliers_one_class_svm(betas, ids, beta_clusters, id_clusters, **kw id_clusters: list List where each cluster of ids will be appended **kwargs: keyword argument - Keywords specific to the OneClassSVM algorythm. + Keywords specific to the OneClassSVM algorithm. Returns ------- @@ -280,7 +280,7 @@ def __detect_outliers_one_class_svm(betas, ids, beta_clusters, id_clusters, **kw def __experimental_outlier_detector(betas, ids, **kwargs): """ - Detects outliers by applying LocalOutlierFactor algorythm from sklearn over multiple slices of betas . + Detects outliers by applying LocalOutlierFactor algorithm from sklearn over multiple slices of betas . Detected outliers will be appended into the provided lists Default keyword arguments: contamination=0.01 Parameters @@ -290,7 +290,7 @@ def __experimental_outlier_detector(betas, ids, **kwargs): ids: np.ndarray Numpy array containing the ids of each beta **kwargs: keyword argument - Keywords specific to the LocalOutlierFactor algorythm + Keywords specific to the LocalOutlierFactor algorithm Returns ------- outliers: np.array @@ -583,9 +583,9 @@ def group_betas( ) -> Union[Tuple[list, list], str]: """ Base function to to group betas into groups, detect outliers. Provides that all different - clustering and outlier detection algorythms are implemented in an easy to access environment. + clustering and outlier detection algorithms are implemented in an easy to access environment. To select different clustering and outlier detection algoyrthms, please use appropriate - KeywordTypes. A description of each function can be accessed with document_algorythm(keyword) + KeywordTypes. A description of each function can be accessed with document_algorithm(keyword) A list of all functions can be accessed with list_detectors_and_clusters() Parameters @@ -595,25 +595,25 @@ def group_betas( betas: np.ndarray Numpy array containing the betas. Betas are expected to be of shape (samples, timestep, 3) - The three entries per beta can either be dimesnions (x,y,z) or any three betas/eigenvalues + The three entries per beta can either be dimensions (x,y,z) or any three betas/eigenvalues cluster: str, optional, default : "KMeans". - String specifying which clustering algorythm shall be applied. + String specifying which clustering algorithm shall be applied. Use ClusterTypefor easier access detector: str, optional, default: None. - String specifying which outlier detection algorythm shall be applied. + String specifying which outlier detection algorithm shall be applied. Use DetectorType for easier access cluster_params: dict, optional - Dictionary containing parameters for the clustering algorythms. + Dictionary containing parameters for the clustering algorithms. See the sklearn documentation for the function to learn more. detector_params: dict, optional - Dictionary containing parameters for the outlier detection algorythms. + Dictionary containing parameters for the outlier detection algorithms. See the sklearn documentation for the function to learn more Returns ------- beta_clusters: list List containing Numpy Arrays of betas in one cluster. - If a detector was selected, or the clustering algorythm has its + If a detector was selected, or the clustering algorithm has its own outlier detection, the first entry in the list will be oultier betas id_clusters: list List containing lists of beta ids. Each id corresponds to the beta in @@ -626,13 +626,13 @@ def group_betas( document_algorithm: Prints docstring of each function into console list_detectors_and_clusters: - Prints out all detection and clustering algorythms into console + Prints out all detection and clustering algorithms into console Sklearn Userguide chapter 2.3 Clustering: https://scikit-learn.org/stable/modules/clustering.html - Detailed overview of different clustering algorythms + Detailed overview of different clustering algorithms Sklearn Examples outlier detection: https://scikit-learn.org/stable/auto_examples/plot_anomaly_comparison.html - Example of different used outlier detection algorythms + Example of different used outlier detection algorithms """ # pylint: disable = too-many-arguments, too-many-locals, too-many-branches @@ -659,9 +659,9 @@ def group_betas( betas = betas[inlier_index] beta_index = beta_index[inlier_index] else: - empy_list = [] - beta_clusters.append(empy_list) - id_clusters.append(empy_list) + empty_list = [] + beta_clusters.append(empty_list) + id_clusters.append(empty_list) detector = None diff --git a/lasso/dimred/svd/html_str_eles.py b/lasso/dimred/svd/html_str_eles.py index 99e462c..00dc666 100644 --- a/lasso/dimred/svd/html_str_eles.py +++ b/lasso/dimred/svd/html_str_eles.py @@ -4,7 +4,7 @@ @@ -243,13 +243,13 @@ hideAll() document.getElementById(div).setAttribute("class", "inputSelected") document.getElementById(aClicked).setAttribute("onclick", `hideInputField('${div}', '${aClicked}')`) - document.getElementById(aClicked).style.background = "var(--menue-option-active)" + document.getElementById(aClicked).style.background = "var(--menu-option-active)" } function hideInputField(div, aClicked){ document.getElementById(div).setAttribute("class", "inputHidden") document.getElementById(aClicked).setAttribute("onclick", `showInputField('${div}', '${aClicked}')`) - document.getElementById(aClicked).style.background = "var(--menue-option-background)" + document.getElementById(aClicked).style.background = "var(--menu-option-background)" } function hideAll(){ @@ -265,7 +265,7 @@ document.getElementById("borderSliderBtn").setAttribute("onclick", "showInputField('borderSliderDiv','borderSliderBtn')") Array.from(document.getElementById("mySidenav").children).forEach(node => { if(node.childElementCount > 0){ - node.children[0].style.background = "var(--menue-option-background)" + node.children[0].style.background = "var(--menu-option-background)" } }) } diff --git a/lasso/dimred/svd/keyword_types.py b/lasso/dimred/svd/keyword_types.py index cc6d95c..0043487 100644 --- a/lasso/dimred/svd/keyword_types.py +++ b/lasso/dimred/svd/keyword_types.py @@ -33,7 +33,7 @@ def get_cluster_type_name() -> typing.List[str]: class DetectorType: - """Specifies names of different outlier detector algorythms + """Specifies names of different outlier detector algorithms Attributes ---------- diff --git a/lasso/dimred/svd/plot_beta_clusters.py b/lasso/dimred/svd/plot_beta_clusters.py index 565e7f0..804d63d 100644 --- a/lasso/dimred/svd/plot_beta_clusters.py +++ b/lasso/dimred/svd/plot_beta_clusters.py @@ -68,7 +68,7 @@ def plot_clusters_js( Set to True if name of visualization shall contain time of creation. If set to False, visualization will override previous file filename: str, default "3d_beta_plot" - Name of .hmtl file + Name of .html file write: bool, default: True Set to False to not write .html file and return as string instead show_res: bool, default: True @@ -77,7 +77,7 @@ def plot_clusters_js( Returns ------- html_str_formatted: str - If **write=False** returns .hmtl file as string, else None + If **write=False** returns .html file as string, else None """ # pylint: disable = too-many-arguments, too-many-locals @@ -138,7 +138,7 @@ def plot_clusters_js( else: name = "cluster {i}".format(i=index) color = colorlist[(index - 1) % 10] - formated_trace = TRACE_STRING.format( + formatted_trace = TRACE_STRING.format( _traceNr_="trace{i}".format(i=index), _name_=name, _color_=color, @@ -148,7 +148,7 @@ def plot_clusters_js( _z_=np.around(cluster[:, 2], decimals=5).tolist(), ) tracelist.append(f"trace{index}") - html_str_formatted += formated_trace + html_str_formatted += formatted_trace trace_list_string = " traceList = [" for trace in tracelist: trace_list_string += trace + ", " diff --git a/lasso/dimred/svd/pod_functions.py b/lasso/dimred/svd/pod_functions.py index d6055d3..1abb000 100644 --- a/lasso/dimred/svd/pod_functions.py +++ b/lasso/dimred/svd/pod_functions.py @@ -16,7 +16,7 @@ def svd_step_and_dim(s_mat: np.ndarray, k=10) -> np.ndarray: ---------- s_mat: ndarray 2D array on which the svds operation shall be performed - k: int, 10, optinal. + k: int, 10, optional. The size of the POD Returns diff --git a/lasso/dimred/svd/subsampling_methods.py b/lasso/dimred/svd/subsampling_methods.py index 6f0e403..ee3d988 100644 --- a/lasso/dimred/svd/subsampling_methods.py +++ b/lasso/dimred/svd/subsampling_methods.py @@ -261,7 +261,7 @@ def remap_random_subsample( t_load: float Time required to load D3plot err_msg: str - If an error occured, a string is returned instead containing the error + If an error occurred, a string is returned instead containing the error """ t_null = time.time() try: diff --git a/lasso/dimred/svd/test_clustering_betas.py b/lasso/dimred/svd/test_clustering_betas.py index 0f95fe1..1b76dc7 100644 --- a/lasso/dimred/svd/test_clustering_betas.py +++ b/lasso/dimred/svd/test_clustering_betas.py @@ -43,7 +43,7 @@ def test_group_betas(self): index = np.where(fake_names == entry)[0] self.assertTrue((fake_betas[index] - beta_clusters[c][e]).max() == 0) - # verify differen keyword kombinations + # verify different keyword combinations for cluster_type in ClusterType.get_cluster_type_name(): for detector_type in DetectorType.get_detector_type_name(): diff --git a/lasso/dimred/svd/test_plot_betas_clusters.py b/lasso/dimred/svd/test_plot_betas_clusters.py index e7edbe6..443784f 100644 --- a/lasso/dimred/svd/test_plot_betas_clusters.py +++ b/lasso/dimred/svd/test_plot_betas_clusters.py @@ -28,5 +28,5 @@ def test_plot_clusters_js(self): if isinstance(html_page_str, str): html_page_hash = hash_str(html_page_str) - desired_hash = "0af9f14ded18d4ac479ac2a07d46eb773ae93a61245e57e650115cc51522c54f" + desired_hash = "53f32e658079dfe8b9f24d7b8ff05a1d253abab77185203e408bfd942c837eeb" self.assertEqual(html_page_hash, desired_hash) diff --git a/lasso/dimred/svd/test_subsampling_methods.py b/lasso/dimred/svd/test_subsampling_methods.py index 84ceaa8..7ecb2f7 100644 --- a/lasso/dimred/svd/test_subsampling_methods.py +++ b/lasso/dimred/svd/test_subsampling_methods.py @@ -38,7 +38,7 @@ def test_create_reference_sample(self): self.assertEqual(ref_sample.shape, (n_nodes, 3)) # should return string error message if desired samplesize is greater - # than avaiable nodes + # than available nodes n_nodes = 5500 result = create_reference_subsample(load_path, parts=[], nr_samples=n_nodes) @@ -90,7 +90,7 @@ def test_remap_random_subsample(self): # with real plots we check if the difference is 0 self.assertTrue((ref_sample - subsample[0]).max() == 0) - # should return string error message for nonexistant parts: + # should return string error message for nonexistent parts: err_msg = remap_random_subsample(sample_path, parts=[1], reference_subsample=ref_sample) self.assertTrue(isinstance(err_msg, str)) diff --git a/lasso/dimred/test_dimred_run.py b/lasso/dimred/test_dimred_run.py index 2520ca0..adba6fb 100644 --- a/lasso/dimred/test_dimred_run.py +++ b/lasso/dimred/test_dimred_run.py @@ -88,7 +88,7 @@ def test_run(self): # shape of v_rob must be (eigen, timesteps, nodes) self.assertEqual(test_v_rob.shape, (10, 5, 2000 * 3)) - # verify that calculated betas are reproducable as expected + # verify that calculated betas are reproducible as expected # first, create displ mat containing difference in displ over time verify_displ_stacked = test_subs.reshape(49, 5, 2000 * 3) verify_diff_mat = np.stack( @@ -103,7 +103,7 @@ def test_run(self): # recalculate displ recalc_displ_stacked = np.einsum("stk, ktn -> stn", test_betas, test_v_rob) - # Due to projection into eigenspace and back not using all avaiable eigenvectors, + # Due to projection into eigenspace and back not using all available eigenvectors, # a small error margin is inevitable self.assertTrue((verify_displ_stacked - recalc_displ_stacked).max() <= 1e-5) diff --git a/lasso/dimred/test_plot_creator.py b/lasso/dimred/test_plot_creator.py index 1dc1735..9f43fc4 100644 --- a/lasso/dimred/test_plot_creator.py +++ b/lasso/dimred/test_plot_creator.py @@ -72,7 +72,7 @@ def create_fake_d3plots( ) # we could create an artificial array element_shell_is_alive to test the - # correct part extraction process not neccessary currently + # correct part extraction process not necessary currently os.makedirs(path, exist_ok=True) plot.write_d3plot(os.path.join(path, "plot")) diff --git a/lasso/dyna/binout.py b/lasso/dyna/binout.py index 361e445..9c6710d 100644 --- a/lasso/dyna/binout.py +++ b/lasso/dyna/binout.py @@ -65,7 +65,7 @@ def __init__(self, filepath: str): self.filelist = glob.glob(filepath) - # check file existance + # check file existence if not self.filelist: raise IOError("No file was found.") diff --git a/lasso/dyna/d3plot.py b/lasso/dyna/d3plot.py index 44a19b2..c672730 100644 --- a/lasso/dyna/d3plot.py +++ b/lasso/dyna/d3plot.py @@ -318,7 +318,7 @@ def build_header(self): ): new_header["ndim"] = 9 else: - raise RuntimeError("Cannot determine haeder variable ndim.") + raise RuntimeError("Cannot determine header variable ndim.") # NUMNP new_header["numnp"] = ( @@ -3632,7 +3632,7 @@ def _read_states_transfer_memory( n_states_buffer_array = buffer_array.shape[0] array[i_state : i_state + n_states_buffer_array] = buffer_array else: - # remove unnecesary state arrays (not geometry arrays!) + # remove unnecessary state arrays (not geometry arrays!) # we "could" deal with this in the allocate function # by not allocating them but this would replicate code # in the reading functions @@ -4604,7 +4604,7 @@ def _read_states_beams(self, state_data: np.ndarray, var_index: int, array_dict: # usual beam vars # pylint: disable = invalid-name N_BEAM_BASIC_VARS = 6 - # beam intergration point vars + # beam integration point vars # pylint: disable = invalid-name N_BEAM_IP_VARS = 5 @@ -5697,7 +5697,7 @@ def _collect_file_infos(self, size_per_state: int) -> List[MemoryInfo]: State data is expected directly behind geometry data Unfortunately data is spread across multiple files. One file could contain geometry and state data but states - may also be littered accross several files. This would + may also be littered across several files. This would not be an issue, if dyna would not always write in blocks of 512 words of memory, leaving zero byte padding blocks at the end of files. These need to be removed and/or taken @@ -5948,7 +5948,7 @@ def _read_state_bytebuffer(self, size_per_state: int): State data is expected directly behind geometry data Unfortunately data is spread across multiple files. One file could contain geometry and state data but states - may also be littered accross several files. This would + may also be littered across several files. This would not be an issue, if dyna would not always write in blocks of 512 words of memory, leaving zero byte padding blocks at the end of files. These need to be removed and/or taken diff --git a/lasso/dyna/d3plot_header.py b/lasso/dyna/d3plot_header.py index 4be19f3..061027b 100644 --- a/lasso/dyna/d3plot_header.py +++ b/lasso/dyna/d3plot_header.py @@ -568,7 +568,7 @@ def load_file(self, file: Union[str, BinaryBuffer]) -> "D3plotHeader": Notes ----- This routine only loads the minimal amount of data - that is neccessary. Thus it is safe to use on huge files. + that is necessary. Thus it is safe to use on huge files. Examples -------- diff --git a/lasso/femzip/femzip_api.py b/lasso/femzip/femzip_api.py index 9a992d2..a3bc7fb 100644 --- a/lasso/femzip/femzip_api.py +++ b/lasso/femzip/femzip_api.py @@ -159,7 +159,7 @@ class FemzipBufferInfo(Structure): Size of the post region of which I currently don't know anymore what it was. size_titles: c_uint64 - Size of the titles region behind the geomtry. + Size of the titles region behind the geometry. """ _fields_ = [