Skip to content

Commit

Permalink
auto format by CI
Browse files Browse the repository at this point in the history
  • Loading branch information
oneflow-ci-bot committed Apr 14, 2022
1 parent 537f005 commit 572f333
Show file tree
Hide file tree
Showing 20 changed files with 482 additions and 96 deletions.
20 changes: 17 additions & 3 deletions benchmark/test_alexnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,43 @@
import oneflow_benchmark
from flowvision.models.alexnet import alexnet


@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size1(benchmark, net=alexnet, input_shape=[1, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size2(benchmark, net=alexnet, input_shape=[2, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size4(benchmark, net=alexnet, input_shape=[4, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size8(benchmark, net=alexnet, input_shape=[8, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)


@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_alexnet_batch_size16(benchmark, net=alexnet, input_shape=[16, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
Expand Down
18 changes: 15 additions & 3 deletions benchmark/test_convnext.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,23 +10,35 @@ def test_convnext_tiny_224_batch_size1(
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_convnext_tiny_224_batch_size2(
benchmark, net=convnext_tiny_224, input_shape=[2, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_convnext_tiny_224_batch_size4(
benchmark, net=convnext_tiny_224, input_shape=[4, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_convnext_tiny_224_batch_size8(
benchmark, net=convnext_tiny_224, input_shape=[8, 3, 224, 224]
Expand Down
18 changes: 15 additions & 3 deletions benchmark/test_crossformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,23 +10,35 @@ def test_crossformer_tiny_patch4_group7_224_batch_size1(
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_crossformer_tiny_patch4_group7_224_batch_size2(
benchmark, net=crossformer_tiny_patch4_group7_224, input_shape=[2, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_crossformer_tiny_patch4_group7_224_batch_size4(
benchmark, net=crossformer_tiny_patch4_group7_224, input_shape=[4, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_crossformer_tiny_patch4_group7_224_batch_size8(
benchmark, net=crossformer_tiny_patch4_group7_224, input_shape=[8, 3, 224, 224]
Expand Down
18 changes: 15 additions & 3 deletions benchmark/test_cswin.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,23 +10,35 @@ def test_cswin_tiny_224_batch_size1(
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_cswin_tiny_224_batch_size2(
benchmark, net=cswin_tiny_224, input_shape=[2, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_cswin_tiny_224_batch_size4(
benchmark, net=cswin_tiny_224, input_shape=[4, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_cswin_tiny_224_batch_size8(
benchmark, net=cswin_tiny_224, input_shape=[8, 3, 224, 224]
Expand Down
18 changes: 15 additions & 3 deletions benchmark/test_densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,23 +10,35 @@ def test_densenet121_batch_size1(
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_densenet121_batch_size2(
benchmark, net=densenet121, input_shape=[2, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_densenet121_batch_size4(
benchmark, net=densenet121, input_shape=[4, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_densenet121_batch_size8(
benchmark, net=densenet121, input_shape=[8, 3, 224, 224]
Expand Down
18 changes: 15 additions & 3 deletions benchmark/test_ghostnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,31 @@ def test_ghostnet_batch_size1(benchmark, net=ghostnet, input_shape=[1, 3, 224, 2
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_ghostnet_batch_size2(benchmark, net=ghostnet, input_shape=[2, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_ghostnet_batch_size4(benchmark, net=ghostnet, input_shape=[4, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_ghostnet_batch_size8(benchmark, net=ghostnet, input_shape=[8, 3, 224, 224]):
model, x, optimizer = fetch_args(net, input_shape)
Expand Down
18 changes: 15 additions & 3 deletions benchmark/test_inception.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,23 +10,35 @@ def test_inception_v3_batch_size1(
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_inception_v3_batch_size2(
benchmark, net=inception_v3, input_shape=[2, 3, 299, 299]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_inception_v3_batch_size4(
benchmark, net=inception_v3, input_shape=[4, 3, 299, 299]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_inception_v3_batch_size8(
benchmark, net=inception_v3, input_shape=[8, 3, 299, 299]
Expand Down
18 changes: 15 additions & 3 deletions benchmark/test_mlp_mixer.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,23 +10,35 @@ def test_mlp_mixer_b16_224_batch_size1(
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_mlp_mixer_b16_224_batch_size2(
benchmark, net=mlp_mixer_b16_224, input_shape=[2, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_mlp_mixer_b16_224_batch_size4(
benchmark, net=mlp_mixer_b16_224, input_shape=[4, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_mlp_mixer_b16_224_batch_size8(
benchmark, net=mlp_mixer_b16_224, input_shape=[8, 3, 224, 224]
Expand Down
18 changes: 15 additions & 3 deletions benchmark/test_mnasnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,23 +10,35 @@ def test_mnasnet0_5_batch_size1(
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_mnasnet0_5_batch_size2(
benchmark, net=mnasnet0_5, input_shape=[2, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_mnasnet0_5_batch_size4(
benchmark, net=mnasnet0_5, input_shape=[4, 3, 224, 224]
):
model, x, optimizer = fetch_args(net, input_shape)
benchmark(run, model, x, optimizer)

@unittest.skipUnless(os.getenv("ONEFLOW_BENCHMARK_ALL") == "1", "set ONEFLOW_BENCHMARK_ALL=1 to run this test")

@unittest.skipUnless(
os.getenv("ONEFLOW_BENCHMARK_ALL") == "1",
"set ONEFLOW_BENCHMARK_ALL=1 to run this test",
)
@oneflow_benchmark.ci_settings(compare={"median": "5%"})
def test_mnasnet0_5_batch_size8(
benchmark, net=mnasnet0_5, input_shape=[8, 3, 224, 224]
Expand Down
Loading

0 comments on commit 572f333

Please sign in to comment.