Skip to content

Commit

Permalink
#17829: Binary ng survey test
Browse files Browse the repository at this point in the history
  • Loading branch information
mouliraj-mcw committed Feb 14, 2025
1 parent 941b34c commit ebb919e
Show file tree
Hide file tree
Showing 12 changed files with 1,807 additions and 0 deletions.
10 changes: 10 additions & 0 deletions .github/workflows/ttnn-run-sweeps.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -357,14 +357,17 @@ on:
- eltwise.binary.div_no_nan.div_no_nan
- eltwise.binary.logical_or.logical_or_
- eltwise.binary.logical_or.logical_or
- eltwise.binary.logical_or.logical_or_survey
- eltwise.binary.logical_or.logical_or_output
- eltwise.binary.logical_or.logical_or_forge
- eltwise.binary.logical_or.logical_or_sharded
- eltwise.binary.logical_xor.logical_xor_
- eltwise.binary.logical_xor.logical_xor
- eltwise.binary.logical_xor.logical_xor_survey
- eltwise.binary.logical_xor.logical_xor_sharded
- eltwise.binary.logical_and.logical_and_
- eltwise.binary.logical_and.logical_and
- eltwise.binary.logical_and.logical_and_survey
- eltwise.binary.logical_and.logical_and_output
- eltwise.binary.logical_and.logical_and_forge
- eltwise.binary.logical_and.logical_and_sharded
Expand All @@ -384,11 +387,15 @@ on:
- eltwise.binary.bcast.bcast_h_sharded
- eltwise.binary.bcast.bcast
- eltwise.binary.eq.eq_scalar_pytorch2
- eltwise.binary.eq.eq_survey
- eltwise.binary.eq.eq_forge
- eltwise.binary.ge.ge_forge
- eltwise.binary.gt.gt_scalar_pytorch2
- eltwise.binary.gt.gt_forge
- eltwise.binary.ge.ge_survey
- eltwise.binary.gt.gt_survey
- eltwise.binary.le.le_tensor_pytorch2
- eltwise.binary.le.le_survey
- eltwise.binary.fmod.fmod
- eltwise.binary.fmod.fmod_unary
- eltwise.binary.fmod.fmod_unary_sharded
Expand All @@ -399,11 +406,14 @@ on:
- eltwise.binary.logaddexp2.logaddexp2
- eltwise.binary.logaddexp2.logaddexp2_sharded
- eltwise.binary.ldexp.ldexp
- eltwise.binary.ldexp.ldexp_survey
- eltwise.binary.ldexp.ldexp_sharded
- eltwise.binary.lt.lt_tensor_pytorch2
- eltwise.binary.lt.lt_survey
- eltwise.binary.lt.lt_scalar_pytorch2
- eltwise.binary.lt.lt_forge
- eltwise.binary.ne.ne_scalar_pytorch2
- eltwise.binary.ne.ne_survey
- eltwise.binary.ne.ne_forge
- eltwise.binary.isclose.isclose
- eltwise.binary.isclose.isclose_sharded
Expand Down
178 changes: 178 additions & 0 deletions tests/sweep_framework/sweeps/eltwise/binary/eq/eq_survey.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

from typing import Optional, Tuple
from functools import partial

import torch
import ttnn
from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt

from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time
from models.utility_functions import torch_random

# Parameters provided to the test vector generator are defined here.
# They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values.
# Each suite has a key name (in this case "suite_1") which will associate the test vectors to this specific suite of inputs.
# Developers can create their own generator functions and pass them to the parameters as inputs.
parameters = {
"lte_survey-4": {
"input_shape": [{"self": [1, 1, 512, 512], "other": [1, 1, 512, 512]}],
"input_a_dtype": [ttnn.bfloat4_b],
"input_b_dtype": [ttnn.bfloat4_b],
"input_a_layout": [ttnn.TILE_LAYOUT],
"input_b_layout": [ttnn.TILE_LAYOUT],
"input_memory_config": [
{"a_mem_config": "l1_interleaved", "b_mem_config": "l1_interleaved"},
{"a_mem_config": "dram_interleaved", "b_mem_config": "l1_interleaved"},
{"a_mem_config": "l1_interleaved", "b_mem_config": "dram_interleaved"},
{"a_mem_config": "dram_interleaved", "b_mem_config": "dram_interleaved"},
{"a_mem_config": "dram_interleaved", "b_mem_config": "l1_height_sharded_rm"},
{"a_mem_config": "dram_interleaved", "b_mem_config": "l1_width_sharded_rm"},
{"a_mem_config": "dram_interleaved", "b_mem_config": "l1_block_sharded_rm"},
{"a_mem_config": "dram_interleaved", "b_mem_config": "l1_height_sharded_cm"},
{"a_mem_config": "dram_interleaved", "b_mem_config": "l1_width_sharded_cm"},
{"a_mem_config": "dram_interleaved", "b_mem_config": "l1_block_sharded_cm"},
{"a_mem_config": "l1_height_sharded_rm", "b_mem_config": "dram_interleaved"},
{"a_mem_config": "l1_width_sharded_rm", "b_mem_config": "dram_interleaved"},
{"a_mem_config": "l1_block_sharded_rm", "b_mem_config": "dram_interleaved"},
{"a_mem_config": "l1_height_sharded_cm", "b_mem_config": "dram_interleaved"},
{"a_mem_config": "l1_width_sharded_cm", "b_mem_config": "dram_interleaved"},
{"a_mem_config": "l1_block_sharded_cm", "b_mem_config": "dram_interleaved"},
{"a_mem_config": "l1_height_sharded_rm", "b_mem_config": "l1_height_sharded_rm"},
{"a_mem_config": "l1_height_sharded_cm", "b_mem_config": "l1_height_sharded_cm"},
{"a_mem_config": "l1_width_sharded_rm", "b_mem_config": "l1_width_sharded_rm"},
{"a_mem_config": "l1_width_sharded_cm", "b_mem_config": "l1_width_sharded_cm"},
{"a_mem_config": "l1_block_sharded_rm", "b_mem_config": "l1_block_sharded_rm"},
{"a_mem_config": "l1_block_sharded_cm", "b_mem_config": "l1_block_sharded_cm"},
],
},
}


def return_mem_config(mem_config_string):
if mem_config_string == "l1_interleaved":
return ttnn.L1_MEMORY_CONFIG
elif mem_config_string == "dram_interleaved":
return ttnn.DRAM_MEMORY_CONFIG
elif mem_config_string == "l1_height_sharded_rm":
return ttnn.create_sharded_memory_config(
shape=(512 // 8, 512),
core_grid=ttnn.CoreGrid(y=2, x=4),
strategy=ttnn.ShardStrategy.HEIGHT,
orientation=ttnn.ShardOrientation.ROW_MAJOR,
use_height_and_width_as_shard_shape=True,
)
elif mem_config_string == "l1_width_sharded_rm":
return ttnn.create_sharded_memory_config(
shape=(512, 512 // 8),
core_grid=ttnn.CoreGrid(y=2, x=4),
strategy=ttnn.ShardStrategy.WIDTH,
orientation=ttnn.ShardOrientation.ROW_MAJOR,
use_height_and_width_as_shard_shape=True,
)
elif mem_config_string == "l1_block_sharded_rm":
return ttnn.create_sharded_memory_config(
shape=(512 // 2, 512 // 4),
core_grid=ttnn.CoreGrid(y=2, x=4),
strategy=ttnn.ShardStrategy.BLOCK,
orientation=ttnn.ShardOrientation.ROW_MAJOR,
use_height_and_width_as_shard_shape=True,
)
elif mem_config_string == "l1_height_sharded_cm":
return ttnn.create_sharded_memory_config(
shape=(512, 512 // 8),
core_grid=ttnn.CoreGrid(y=2, x=4),
strategy=ttnn.ShardStrategy.HEIGHT,
orientation=ttnn.ShardOrientation.COL_MAJOR,
use_height_and_width_as_shard_shape=True,
)
elif mem_config_string == "l1_width_sharded_cm":
return ttnn.create_sharded_memory_config(
shape=(512 // 8, 512),
core_grid=ttnn.CoreGrid(y=2, x=4),
strategy=ttnn.ShardStrategy.WIDTH,
orientation=ttnn.ShardOrientation.COL_MAJOR,
use_height_and_width_as_shard_shape=True,
)
elif mem_config_string == "l1_block_sharded_cm":
return ttnn.create_sharded_memory_config(
shape=(512 // 2, 512 // 4),
core_grid=ttnn.CoreGrid(y=2, x=4),
strategy=ttnn.ShardStrategy.BLOCK,
orientation=ttnn.ShardOrientation.COL_MAJOR,
use_height_and_width_as_shard_shape=True,
)
raise ("Input mem_config_string is not valid!")


# This is the run instructions for the test, defined by the developer.
# The run function must take the above-defined parameters as inputs.
# The runner will call this run function with each test vector, and the returned results from this function will be stored.
# If you defined a device_mesh_fixture above, the object you yielded will be passed into this function as 'device'. Otherwise, it will be the default ttnn device opened by the infra.
def run(
input_shape,
input_a_dtype,
input_b_dtype,
input_a_layout,
input_b_layout,
input_memory_config,
*,
device,
) -> list:
torch.manual_seed(0)

input_a_memory_config = input_memory_config["a_mem_config"]
input_b_memory_config = input_memory_config["b_mem_config"]

torch_input_tensor_a = gen_func_with_cast_tt(
partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype
)(input_shape["self"])

if isinstance(input_shape["other"], list):
torch_input_tensor_b = gen_func_with_cast_tt(
partial(torch_random, low=-105, high=105, dtype=torch.float32), input_b_dtype
)(input_shape["other"])
else:
torch_input_tensor_b = torch.tensor(input_shape["other"], dtype=torch.float32)

input_tensor_a = ttnn.from_torch(
torch_input_tensor_a,
dtype=input_a_dtype,
layout=input_a_layout,
device=device,
memory_config=return_mem_config(input_a_memory_config),
)

input_tensor_b = ttnn.from_torch(
torch_input_tensor_b,
dtype=input_b_dtype,
layout=input_b_layout,
device=device,
memory_config=return_mem_config(input_b_memory_config),
)

if input_a_dtype == ttnn.bfloat8_b:
torch_input_tensor_a = ttnn.to_torch(input_tensor_a)

if input_b_dtype == ttnn.bfloat8_b:
torch_input_tensor_b = ttnn.to_torch(input_tensor_b)

if input_a_dtype == ttnn.bfloat4_b:
torch_input_tensor_a = ttnn.to_torch(input_tensor_a)

if input_b_dtype == ttnn.bfloat4_b:
torch_input_tensor_b = ttnn.to_torch(input_tensor_b)

golden_function = ttnn.get_golden_function(ttnn.le)
torch_output_tensor = golden_function(torch_input_tensor_a, torch_input_tensor_b)
start_time = start_measuring_time()
result = ttnn.experimental.lte(input_tensor_a, input_tensor_b)
output_tensor = ttnn.to_torch(result)
torch.set_printoptions(linewidth=400, threshold=10000, precision=5, sci_mode=False, edgeitems=17)
print("torch_output_tensor", torch_output_tensor)
print("output_tensor", output_tensor)
e2e_perf = stop_measuring_time(start_time)

return [check_with_pcc(torch_output_tensor, output_tensor, pcc=0.999), e2e_perf]
Loading

0 comments on commit ebb919e

Please sign in to comment.