Skip to content

MeanSquaredError

minnt.losses.MeanSquaredError

Bases: Loss

Mean squared error loss implementation.

Source code in minnt/losses/mean_squared_error.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
class MeanSquaredError(Loss):
    """Mean squared error loss implementation."""

    def __init__(self, *, reduction: Reduction = "mean") -> None:
        """Create the MeanSquaredError object with the specified reduction method.

        Parameters:
          reduction: The reduction method to apply to the computed loss.
        """

        self._reduction = reduction

    def __call__(
        self, y: torch.Tensor, y_true: torch.Tensor, sample_weights: torch.Tensor | None = None,
    ) -> torch.Tensor:
        """Compute the mean squared error loss, optionally with sample weights.

        Parameters:
          y: The predicted outputs. Their shape either has to be exactly the same as `y_true` (no broadcasting),
            or can contain an additional single dimension of size 1.
          y_true: The ground-truth targets.
          sample_weights: Optional sample weights. If provided, their shape must be broadcastable
            to a prefix of a shape of `y_true`, and the loss for each sample is weighted accordingly.

        Returns:
          A tensor representing the computed loss. A scalar tensor if reduction is `"mean"` or `"sum"`;
            otherwise (if reduction is `"none"`), a tensor of the same shape as `y_true`.
        """
        y = maybe_remove_one_singleton_dimension(y, y_true)
        assert y.shape == y_true.shape, f"Shapes of y {y.shape} and y_true {y_true.shape} have to match " \
            "up to one singleton dim in y."

        if sample_weights is not None:
            sample_weights = broadcast_to_prefix(sample_weights, y_true.shape)

        return mse_loss(y, y_true, reduction=self._reduction, weight=sample_weights)

__init__

__init__(*, reduction: Reduction = 'mean') -> None

Create the MeanSquaredError object with the specified reduction method.

Parameters:

  • reduction (Reduction, default: 'mean' ) –

    The reduction method to apply to the computed loss.

Source code in minnt/losses/mean_squared_error.py
18
19
20
21
22
23
24
25
def __init__(self, *, reduction: Reduction = "mean") -> None:
    """Create the MeanSquaredError object with the specified reduction method.

    Parameters:
      reduction: The reduction method to apply to the computed loss.
    """

    self._reduction = reduction

__call__

__call__(
    y: Tensor, y_true: Tensor, sample_weights: Tensor | None = None
) -> Tensor

Compute the mean squared error loss, optionally with sample weights.

Parameters:

  • y (Tensor) –

    The predicted outputs. Their shape either has to be exactly the same as y_true (no broadcasting), or can contain an additional single dimension of size 1.

  • y_true (Tensor) –

    The ground-truth targets.

  • sample_weights (Tensor | None, default: None ) –

    Optional sample weights. If provided, their shape must be broadcastable to a prefix of a shape of y_true, and the loss for each sample is weighted accordingly.

Returns:

  • Tensor

    A tensor representing the computed loss. A scalar tensor if reduction is "mean" or "sum"; otherwise (if reduction is "none"), a tensor of the same shape as y_true.

Source code in minnt/losses/mean_squared_error.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def __call__(
    self, y: torch.Tensor, y_true: torch.Tensor, sample_weights: torch.Tensor | None = None,
) -> torch.Tensor:
    """Compute the mean squared error loss, optionally with sample weights.

    Parameters:
      y: The predicted outputs. Their shape either has to be exactly the same as `y_true` (no broadcasting),
        or can contain an additional single dimension of size 1.
      y_true: The ground-truth targets.
      sample_weights: Optional sample weights. If provided, their shape must be broadcastable
        to a prefix of a shape of `y_true`, and the loss for each sample is weighted accordingly.

    Returns:
      A tensor representing the computed loss. A scalar tensor if reduction is `"mean"` or `"sum"`;
        otherwise (if reduction is `"none"`), a tensor of the same shape as `y_true`.
    """
    y = maybe_remove_one_singleton_dimension(y, y_true)
    assert y.shape == y_true.shape, f"Shapes of y {y.shape} and y_true {y_true.shape} have to match " \
        "up to one singleton dim in y."

    if sample_weights is not None:
        sample_weights = broadcast_to_prefix(sample_weights, y_true.shape)

    return mse_loss(y, y_true, reduction=self._reduction, weight=sample_weights)