Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug/992 precision loss #993

Open
wants to merge 14 commits into
base: main
Choose a base branch
from
31 changes: 23 additions & 8 deletions heat/core/factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,10 +138,12 @@ def arange(
# compose the local tensor
start += offset * step
stop = start + lshape[0] * step
data = torch.arange(start, stop, step, device=device.torch_device)

htype = types.canonical_heat_type(dtype)
data = data.type(htype.torch_type())
if types.issubdtype(htype, types.floating):
data = torch.arange(start, stop, step, dtype=htype.torch_type(), device=device.torch_device)
else:
data = torch.arange(start, stop, step, device=device.torch_device)
data = data.type(htype.torch_type())

return DNDarray(data, gshape, htype, split, device, comm, balanced)

Expand Down Expand Up @@ -301,8 +303,11 @@ def array(
obj = obj.larray

# sanitize the data type
if dtype is not None:
if dtype is None:
torch_dtype = None
else:
dtype = types.canonical_heat_type(dtype)
torch_dtype = dtype.torch_type()

# sanitize device
if device is not None:
Expand All @@ -318,6 +323,7 @@ def array(
try:
obj = torch.tensor(
obj,
dtype=torch_dtype,
device=device.torch_device
if device is not None
else devices.get_device().torch_device,
Expand All @@ -341,6 +347,7 @@ def array(
try:
obj = torch.as_tensor(
obj,
dtype=torch_dtype,
device=device.torch_device
if device is not None
else devices.get_device().torch_device,
Expand All @@ -352,7 +359,6 @@ def array(
if dtype is None:
dtype = types.canonical_heat_type(obj.dtype)
else:
torch_dtype = dtype.torch_type()
if obj.dtype != torch_dtype:
obj = obj.type(torch_dtype)

Expand Down Expand Up @@ -1145,9 +1151,18 @@ def linspace(
# compose the local tensor
start += offset * step
stop = start + lshape[0] * step - step
data = torch.linspace(start, stop, lshape[0], device=device.torch_device)
if dtype is not None:
data = data.type(types.canonical_heat_type(dtype).torch_type())
if dtype is not None and types.issubdtype(dtype, types.floating):
data = torch.linspace(
start,
stop,
lshape[0],
dtype=types.canonical_heat_type(dtype).torch_type(),
device=device.torch_device,
)
else:
data = torch.linspace(start, stop, lshape[0], device=device.torch_device)
if dtype is not None:
data = data.type(types.canonical_heat_type(dtype).torch_type())

# construct the resulting global tensor
ht_tensor = DNDarray(
Expand Down
2 changes: 2 additions & 0 deletions heat/core/linalg/basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -1192,6 +1192,8 @@ def matrix_norm(

row_axis, col_axis = axis

# dtype = types.promote_types(x.dtype, types.float32)

if ord == 1:
if col_axis > row_axis and not keepdims:
col_axis -= 1
Expand Down
8 changes: 6 additions & 2 deletions heat/core/rounding.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def abs(
if dtype is not None and not issubclass(dtype, dtype):
raise TypeError("dtype must be a heat data type")

absolute_values = _operations.__local_op(torch.abs, x, out)
absolute_values = _operations.__local_op(torch.abs, x, out, no_cast=True)
if dtype is not None:
absolute_values.larray = absolute_values.larray.type(dtype.torch_type())
absolute_values._DNDarray__dtype = dtype
Expand Down Expand Up @@ -181,7 +181,11 @@ def fabs(x: DNDarray, out: Optional[DNDarray] = None) -> DNDarray:
If not provided or ``None``, a freshly-allocated array is returned.

"""
return abs(x, out, dtype=None)
if isinstance(x, DNDarray):
dtype = types.promote_types(x.dtype, types.float32)
else:
dtype = types.float32
return abs(x, out, dtype=dtype)


DNDarray.fabs: Callable[[DNDarray, Optional[DNDarray]], DNDarray] = lambda self, out=None: fabs(
Expand Down
7 changes: 7 additions & 0 deletions heat/core/tests/test_factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,9 @@ def test_arange(self):
# make an in direct check for the sequence, compare against the gaussian sum
self.assertEqual(three_arg_arange_dtype_float64.sum(axis=0, keepdims=True), 20.0)

check_precision = ht.arange(16777217.0, 16777218, 1, dtype=ht.float64)
self.assertEqual(check_precision.sum(), 16777217)

# exceptions
with self.assertRaises(ValueError):
ht.arange(-5, 3, split=1)
Expand Down Expand Up @@ -142,6 +145,8 @@ def test_array(self):
== torch.tensor(tuple_data, dtype=torch.int8, device=self.device.torch_device)
).all()
)
check_precision = ht.array(16777217.0, dtype=ht.float64)
self.assertEqual(check_precision.sum(), 16777217)

# basic array function, unsplit data, no copy
torch_tensor = torch.tensor([6, 5, 4, 3, 2, 1], device=self.device.torch_device)
Expand Down Expand Up @@ -664,6 +669,8 @@ def test_linspace(self):

zero_samples = ht.linspace(-3, 5, num=0)
self.assertEqual(zero_samples.size, 0)
check_precision = ht.linspace(0.0, 16777217.0, num=2, dtype=torch.float64)
self.assertEqual(check_precision.sum(), 16777217)

# simple inverse linear space
descending = ht.linspace(-5, 3, num=100)
Expand Down
4 changes: 4 additions & 0 deletions heat/core/tests/test_rounding.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,10 @@ def test_abs(self):
self.assertEqual(absolute_values.sum(axis=0), 100)
self.assertEqual(absolute_values.dtype, ht.float32)
self.assertEqual(absolute_values.larray.dtype, torch.float32)
check_precision = ht.asarray(9007199254740993, dtype=ht.int64)
precision_absolute_values = ht.abs(check_precision, dtype=ht.int64)
self.assertEqual(precision_absolute_values.sum(), check_precision.sum())
self.assertEqual(precision_absolute_values.dtype, check_precision.dtype)
# for fabs
self.assertEqual(int8_absolute_values_fabs.dtype, ht.float32)
self.assertEqual(int16_absolute_values_fabs.dtype, ht.float32)
Expand Down
Loading