6464
6565class EddyMotionGPR (GaussianProcessRegressor ):
6666 r"""
67- A GP regressor specialized for eddymotion.
67+ A Gaussian process (GP) regressor specialized for eddymotion.
6868
6969 This specialization of the default GP regressor is created to allow
7070 the following extended behaviors:
@@ -80,22 +80,21 @@ class EddyMotionGPR(GaussianProcessRegressor):
8080
8181 In principle, Scikit-Learn's implementation normalizes the training data
8282 as in [Andersson15]_ (see
83- `FSL's souce code <https://git.fmrib.ox.ac.uk/fsl/eddy/-/blob/2480dda293d4cec83014454db3a193b87921f6b0/DiffusionGP.cpp#L218>`__).
83+ `FSL's source code <https://git.fmrib.ox.ac.uk/fsl/eddy/-/blob/2480dda293d4cec83014454db3a193b87921f6b0/DiffusionGP.cpp#L218>`__).
8484 From their paper (p. 167, end of first column):
8585
86- Typically one just substracts the mean (:math:`\bar{\mathbf{f}}`)
86+ Typically one just subtracts the mean (:math:`\bar{\mathbf{f}}`)
8787 from :math:`\mathbf{f}` and then add it back to
8888 :math:`f^{*}`, which is analogous to what is often done in
8989 "traditional" regression.
9090
9191 Finally, the parameter :math:`\sigma^2` maps on to Scikit-learn's ``alpha``
92- of the regressor.
93- Because it is not a parameter of the kernel, hyperparameter selection
94- through gradient-descent with analytical gradient calculations
95- would not work (the derivative of the kernel w.r.t. alpha is zero).
92+ of the regressor. Because it is not a parameter of the kernel, hyperparameter
93+ selection through gradient-descent with analytical gradient calculations
94+ would not work (the derivative of the kernel w.r.t. ``alpha`` is zero).
9695
97- I believe this is overlooked in [Andersson15]_, or they actually did not
98- use analytical gradient-descent:
96+ This might have been overlooked in [Andersson15]_, or else they actually did
97+ not use analytical gradient-descent:
9998
10099 *A note on optimisation*
101100
@@ -266,7 +265,6 @@ def __init__(
266265 l_bounds : tuple [float , float ] = BOUNDS_LAMBDA ,
267266 ):
268267 r"""
269- Initialize an exponential Kriging kernel.
270268
271269 Parameters
272270 ----------
@@ -275,7 +273,7 @@ def __init__(
275273 beta_l : :obj:`float`, optional
276274 The :math:`\lambda` hyperparameter.
277275 a_bounds : :obj:`tuple`, optional
278- Bounds for the a parameter.
276+ Bounds for the ``a`` parameter.
279277 l_bounds : :obj:`tuple`, optional
280278 Bounds for the :math:`\lambda` hyperparameter.
281279
@@ -290,7 +288,7 @@ def hyperparameter_a(self) -> Hyperparameter:
290288 return Hyperparameter ("beta_a" , "numeric" , self .a_bounds )
291289
292290 @property
293- def hyperparameter_beta_l (self ) -> Hyperparameter :
291+ def hyperparameter_l (self ) -> Hyperparameter :
294292 return Hyperparameter ("beta_l" , "numeric" , self .l_bounds )
295293
296294 def __call__ (
@@ -312,10 +310,10 @@ def __call__(
312310
313311 Returns
314312 -------
315- K : ndarray of shape (n_samples_X, n_samples_Y)
313+ K : :obj:`~numpy. ndarray` of shape (n_samples_X, n_samples_Y)
316314 Kernel k(X, Y)
317315
318- K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
316+ K_gradient : :obj:`~numpy. ndarray` of shape (n_samples_X, n_samples_X, n_dims),\
319317 optional
320318 The gradient of the kernel k(X, X) with respect to the log of the
321319 hyperparameter of the kernel. Only returned when `eval_gradient`
@@ -343,12 +341,12 @@ def diag(self, X: np.ndarray) -> np.ndarray:
343341
344342 Parameters
345343 ----------
346- X : ndarray of shape (n_samples_X, n_features)
344+ X : :obj:`~numpy. ndarray` of shape (n_samples_X, n_features)
347345 Left argument of the returned kernel k(X, Y)
348346
349347 Returns
350348 -------
351- K_diag : ndarray of shape (n_samples_X,)
349+ K_diag : :obj:`~numpy. ndarray` of shape (n_samples_X,)
352350 Diagonal of kernel k(X, X)
353351 """
354352 return self .beta_l * np .ones (X .shape [0 ])
@@ -372,7 +370,6 @@ def __init__(
372370 l_bounds : tuple [float , float ] = BOUNDS_LAMBDA ,
373371 ):
374372 r"""
375- Initialize a spherical Kriging kernel.
376373
377374 Parameters
378375 ----------
@@ -396,7 +393,7 @@ def hyperparameter_a(self) -> Hyperparameter:
396393 return Hyperparameter ("beta_a" , "numeric" , self .a_bounds )
397394
398395 @property
399- def hyperparameter_beta_l (self ) -> Hyperparameter :
396+ def hyperparameter_l (self ) -> Hyperparameter :
400397 return Hyperparameter ("beta_l" , "numeric" , self .l_bounds )
401398
402399 def __call__ (
@@ -418,10 +415,10 @@ def __call__(
418415
419416 Returns
420417 -------
421- K : ndarray of shape (n_samples_X, n_samples_Y)
418+ K : :obj:`~numpy. ndarray` of shape (n_samples_X, n_samples_Y)
422419 Kernel k(X, Y)
423420
424- K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
421+ K_gradient : :obj:`~numpy. ndarray` of shape (n_samples_X, n_samples_X, n_dims),\
425422 optional
426423 The gradient of the kernel k(X, X) with respect to the log of the
427424 hyperparameter of the kernel. Only returned when ``eval_gradient``
@@ -454,12 +451,12 @@ def diag(self, X: np.ndarray) -> np.ndarray:
454451
455452 Parameters
456453 ----------
457- X : ndarray of shape (n_samples_X, n_features)
454+ X : :obj:`~numpy. ndarray` of shape (n_samples_X, n_features)
458455 Left argument of the returned kernel k(X, Y)
459456
460457 Returns
461458 -------
462- K_diag : ndarray of shape (n_samples_X,)
459+ K_diag : :obj:`~numpy. ndarray` of shape (n_samples_X,)
463460 Diagonal of kernel k(X, X)
464461 """
465462 return self .beta_l * np .ones (X .shape [0 ])
0 commit comments