Skip to content

NoDecay

minnt.schedulers.NoDecay

Bases: GenericDecay

A non-decaying learning rate scheduler with optional linear warmup.

This scheduler is a convenience wrapper around minnt.schedulers.GenericDecay with the decay parameter set to "none" and total_steps being optional.

Source code in minnt/schedulers/no_decay.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
class NoDecay(GenericDecay):
    """A non-decaying learning rate scheduler with optional linear warmup.

    This scheduler is a convenience wrapper around [minnt.schedulers.GenericDecay][]
    with the `decay` parameter set to `"none"` and `total_steps` being optional.
    """
    def __init__(
        self,
        optimizer: torch.optim.Optimizer,
        total_steps: int | None = None,
        *,
        warmup: int | float = 0,
        last_epoch: int = -1,
    ) -> None:
        """Creates a new NoDecay scheduler instance.

        Parameters:
          optimizer: The optimizer for which to schedule the learning rate.
          total_steps: An optional number of training steps. Useful only when `warmup` is
            specified as a fraction.
          warmup: Specifies the warmup phase. If a number smaller than 1 is given,
            it is treated as a fraction of `total_steps`; otherwise, it is treated as
            an absolute number of steps. Default is 0 (no warmup).
          last_epoch: The index of the last epoch when resuming training. Default is -1.
        """
        if total_steps is None:
            if 0 < warmup < 1:
                raise ValueError("If total_steps is None, warmup must be zero or a absolute number of steps.")
            total_steps = warmup

        super().__init__(
            optimizer,
            total_steps,
            decay="none",
            warmup=warmup,
            last_epoch=last_epoch,
            warn_about_exceeding_steps=False,
        )

__init__

__init__(
    optimizer: Optimizer,
    total_steps: int | None = None,
    *,
    warmup: int | float = 0,
    last_epoch: int = -1
) -> None

Creates a new NoDecay scheduler instance.

Parameters:

  • optimizer (Optimizer) –

    The optimizer for which to schedule the learning rate.

  • total_steps (int | None, default: None ) –

    An optional number of training steps. Useful only when warmup is specified as a fraction.

  • warmup (int | float, default: 0 ) –

    Specifies the warmup phase. If a number smaller than 1 is given, it is treated as a fraction of total_steps; otherwise, it is treated as an absolute number of steps. Default is 0 (no warmup).

  • last_epoch (int, default: -1 ) –

    The index of the last epoch when resuming training. Default is -1.

Source code in minnt/schedulers/no_decay.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def __init__(
    self,
    optimizer: torch.optim.Optimizer,
    total_steps: int | None = None,
    *,
    warmup: int | float = 0,
    last_epoch: int = -1,
) -> None:
    """Creates a new NoDecay scheduler instance.

    Parameters:
      optimizer: The optimizer for which to schedule the learning rate.
      total_steps: An optional number of training steps. Useful only when `warmup` is
        specified as a fraction.
      warmup: Specifies the warmup phase. If a number smaller than 1 is given,
        it is treated as a fraction of `total_steps`; otherwise, it is treated as
        an absolute number of steps. Default is 0 (no warmup).
      last_epoch: The index of the last epoch when resuming training. Default is -1.
    """
    if total_steps is None:
        if 0 < warmup < 1:
            raise ValueError("If total_steps is None, warmup must be zero or a absolute number of steps.")
        total_steps = warmup

    super().__init__(
        optimizer,
        total_steps,
        decay="none",
        warmup=warmup,
        last_epoch=last_epoch,
        warn_about_exceeding_steps=False,
    )