Skip to content

Commit

Permalink
remove pad custom op (#4801)
Browse files Browse the repository at this point in the history
  • Loading branch information
lucylq authored Aug 22, 2024
1 parent ea4a187 commit ce4917c
Show file tree
Hide file tree
Showing 5 changed files with 0 additions and 137 deletions.
7 changes: 0 additions & 7 deletions examples/models/flamingo/export_preprocess_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,6 @@
from torch.export import Dim, ExportedProgram
from torchtune.models.clip.inference._transforms import _CLIPImageTransform

from .passes.replace_custom_ops_with_aten_ops_pass import (
ReplaceCustomOpsWithAtenOpsPass,
)


def get_example_inputs() -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
image = torch.ones(3, 800, 600)
Expand Down Expand Up @@ -59,7 +55,6 @@ def export_preprocess(
)

# Replace non-exportable ops with custom ops.
image_transform_model.pad = torch.ops.preprocess.pad.default
image_transform_model.tile_crop = torch.ops.preprocess.tile_crop.default

# Export.
Expand All @@ -80,8 +75,6 @@ def lower_to_executorch_preprocess(
edge_program = to_edge(
exported_program, compile_config=EdgeCompileConfig(_check_ir_validity=False)
)
# Replace custom ops with aten ops.
edge_program = edge_program.transform([ReplaceCustomOpsWithAtenOpsPass()])

et_program = edge_program.to_executorch(ExecutorchBackendConfig())
return et_program
Empty file.

This file was deleted.

50 changes: 0 additions & 50 deletions examples/models/flamingo/passes/test_passes.py

This file was deleted.

49 changes: 0 additions & 49 deletions extension/llm/custom_ops/preprocess_custom_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,61 +7,12 @@
# pyre-unsafe


from typing import List

import torch

from torch.library import impl, Library

preprocess_op_lib = Library("preprocess", "DEF")

# Register and define pad and out variant.
# Note: pad doesn't require an explicit meta kernel because
# CompositeExplicitAutograd automatically registers the implementation to meta,
# and meta kernels do not go through functionalization. The implementation
# does not export due to issues during functionalization.
# See: https://github.com/pytorch/pytorch/issues/120288
preprocess_op_lib.define("pad(Tensor image, SymInt[] padding) -> Tensor")


@impl(preprocess_op_lib, "pad", dispatch_key="CompositeExplicitAutograd")
def pad_impl(
image: torch.Tensor,
padding: List[int],
) -> torch.Tensor:
output = torch.empty(
[image.shape[0], image.shape[1] + padding[3], image.shape[2] + padding[1]],
dtype=image.dtype,
device=image.device,
requires_grad=False,
)
output = torch.fill(output, 0)
output.narrow(1, 0, image.shape[1]).narrow(2, 0, image.shape[2]).copy_(image)
return output


preprocess_op_lib.define(
"pad.out(Tensor image, SymInt[] padding, *, Tensor(a!) out) -> Tensor(a!)"
)


@impl(preprocess_op_lib, "pad.out", dispatch_key="CompositeExplicitAutograd")
def pad_out_impl(
image: torch.Tensor,
padding: List[int],
out: torch.Tensor,
) -> torch.Tensor:
out = torch.empty(
[image.shape[0], image.shape[1] + padding[3], image.shape[2] + padding[1]],
dtype=image.dtype,
device=image.device,
requires_grad=False,
)
out = torch.fill(out, 0)
out.narrow(1, 0, image.shape[1]).narrow(2, 0, image.shape[2]).copy_(image)
return out


# Register and define tile_crop and out variant.
preprocess_op_lib.define("tile_crop(Tensor input, int tile_size) -> Tensor")

Expand Down

0 comments on commit ce4917c

Please sign in to comment.