-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Use power-of-two scaling in
autoscale
scaled translation ops rules.
As shown in #60 issue, propagating non power-of-two scaling factors can decrease training accuracy in low precision (typically in FP16). The additional rescaling operations will introduce non-negligible floating point accumulated errors. Ths PR is adding the option to round the scale to a power-of-two in scaled translation. Supporting at the moment only rounding up and down. The rounding mode can be modified in the config dataclass `AutoScaleConfig`.
- Loading branch information
Showing
9 changed files
with
189 additions
and
12 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,65 @@ | ||
# Copyright (c) 2023 Graphcore Ltd. All rights reserved. | ||
from enum import IntEnum | ||
from typing import Any, Dict | ||
|
||
import numpy as np | ||
from numpy.typing import NDArray | ||
|
||
from .typing import Array, get_numpy_api | ||
|
||
# Exponent bits masking. | ||
_exponent_bits_mask: Dict[Any, NDArray[Any]] = { | ||
np.dtype(np.float16): np.packbits(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], dtype=np.uint8)).view( | ||
np.int16 | ||
), | ||
np.dtype(np.float32): np.packbits( | ||
np.array( | ||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], | ||
dtype=np.uint8, | ||
) | ||
).view(np.int32), | ||
np.dtype(np.float64): np.array(np.inf, np.float64).view(np.int64), | ||
} | ||
"""Exponents bit masking: explicit bitmask to keep only exponent bits in floating point values. | ||
NOTE: normally should also correspond to `np.inf` value for FP16 and FP32. | ||
""" | ||
|
||
|
||
class Pow2RoundMode(IntEnum): | ||
"""Power-of-two supported rounded mode.""" | ||
|
||
NONE = 0 | ||
DOWN = 1 | ||
UP = 2 | ||
STOCHASTIC = 3 | ||
|
||
|
||
def pow2_round_down(val: Array) -> Array: | ||
"""Round down to the closest power of 2.""" | ||
np_api = get_numpy_api(val) | ||
exponent_mask = _exponent_bits_mask[val.dtype] | ||
intdtype = exponent_mask.dtype | ||
pow2_val = np_api.bitwise_and(val.view(intdtype), exponent_mask).view(val.dtype).reshape(val.shape) | ||
return pow2_val | ||
|
||
|
||
def pow2_round_up(val: Array) -> Array: | ||
"""Round up to the closest power of 2. | ||
NOTE: may overflow to inf. | ||
""" | ||
# FIXME: rounding when already a power of 2. | ||
# Should do additional masking to check that. | ||
pow2_val = pow2_round_down(val) * np.array(2, dtype=val.dtype) | ||
return pow2_val | ||
|
||
|
||
def pow2_round(val: Array, mode: Pow2RoundMode = Pow2RoundMode.DOWN) -> Array: | ||
"""Power-of-two rounding.""" | ||
if mode == Pow2RoundMode.NONE: | ||
return val | ||
elif mode == Pow2RoundMode.DOWN: | ||
return pow2_round_down(val) | ||
elif mode == Pow2RoundMode.UP: | ||
return pow2_round_up(val) | ||
raise NotImplementedError(f"Unsupported power-of-2 rounding mode '{mode}'.") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
# Copyright (c) 2023 Graphcore Ltd. All rights reserved. | ||
import chex | ||
import numpy as np | ||
import numpy.testing as npt | ||
from absl.testing import parameterized | ||
|
||
from jax_scaled_arithmetics.core import pow2_round_down, pow2_round_up | ||
from jax_scaled_arithmetics.core.utils import _exponent_bits_mask | ||
|
||
|
||
class Pow2RoundingUtilTests(chex.TestCase): | ||
@parameterized.parameters( | ||
{"dtype": np.float16}, | ||
{"dtype": np.float32}, | ||
) | ||
def test__exponent_bitmask__inf_value(self, dtype): | ||
val = _exponent_bits_mask[np.dtype(dtype)].view(dtype) | ||
expected_val = dtype(np.inf) | ||
npt.assert_equal(val, expected_val) | ||
|
||
@parameterized.product( | ||
val_exp=[(1, 1), (2.1, 2), (0.3, 0.25), (0.51, 0.5), (65500, 32768)], | ||
dtype=[np.float16, np.float32, np.float64], | ||
) | ||
def test__pow2_round_down__proper_rounding__multi_dtypes(self, val_exp, dtype): | ||
val, exp = dtype(val_exp[0]), dtype(val_exp[1]) | ||
pow2_val = pow2_round_down(val) | ||
assert pow2_val.dtype == val.dtype | ||
assert pow2_val.shape == () | ||
assert type(pow2_val) in {type(val), np.ndarray} | ||
npt.assert_equal(pow2_val, exp) | ||
|
||
@parameterized.product( | ||
val_exp=[(2.1, 4), (0.3, 0.5), (0.51, 1), (17000, 32768)], | ||
dtype=[np.float16], | ||
) | ||
def test__pow2_round_up__proper_rounding__multi_dtypes(self, val_exp, dtype): | ||
val, exp = dtype(val_exp[0]), dtype(val_exp[1]) | ||
pow2_val = pow2_round_up(val) | ||
assert pow2_val.dtype == val.dtype | ||
assert type(pow2_val) in {type(val), np.ndarray} | ||
npt.assert_equal(pow2_val, exp) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters