Skip to content

Commit

Permalink
fixing imports
Browse files Browse the repository at this point in the history
  • Loading branch information
Sara Adkins committed May 28, 2024
1 parent d83fb2e commit 25d8b1d
Show file tree
Hide file tree
Showing 22 changed files with 73 additions and 23 deletions.
2 changes: 1 addition & 1 deletion src/sparseml/modifiers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,5 @@
from .logarithmic_equalization import *
from .obcq import *
from .pruning import *
from .quantization_legacy import *
from .quantization import *
from .smoothquant import *
3 changes: 2 additions & 1 deletion src/sparseml/modifiers/quantization/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@

# flake8: noqa

from .base import *
from .gptq import *
from .quantization import *
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@

from sparseml.core.model import ModifiableModel
from sparseml.core.state import State
from sparseml.modifiers.quantization_legacy.gptq.base import GPTQModifier
from sparseml.modifiers.quantization_legacy.gptq.utils.gptq_wrapper import GPTQWrapper
from sparseml.modifiers.quantization.gptq.base import GPTQModifier
from sparseml.modifiers.quantization.gptq.utils.gptq_wrapper import GPTQWrapper
from sparseml.modifiers.utils.layer_compressor import LayerCompressor
from sparseml.modifiers.utils.pytorch_helpers import run_calibration_forward
from sparseml.utils.fsdp.context import fix_fsdp_module_name
Expand Down
17 changes: 17 additions & 0 deletions src/sparseml/modifiers/quantization/quantization/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# flake8: noqa

from .base import *
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
set_module_for_calibration,
)
from sparseml.core import Event, EventType, State
from sparseml.modifiers.quantization.base import QuantizationModifier
from sparseml.modifiers.quantization.quantization.base import QuantizationModifier
from sparseml.modifiers.utils.pytorch_helpers import run_calibration_forward


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
import logging
import os

from sparseml.modifiers.quantization_legacy.modification.registry import ModificationRegistry
from sparseml.modifiers.quantization_legacy.modification.registry import (
ModificationRegistry,
)


_LOGGER = logging.getLogger(__name__)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,9 @@
except Exception:
torch_quantization = None

from sparseml.modifiers.quantization_legacy.utils.fake_quant_wrapper import FakeQuantizeWrapper
from sparseml.modifiers.quantization_legacy.utils.fake_quant_wrapper import (
FakeQuantizeWrapper,
)


__all__ = [
Expand Down
8 changes: 6 additions & 2 deletions src/sparseml/modifiers/quantization_legacy/utils/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,17 @@
FUSED_MODULE_NAMES,
NON_QUANTIZABLE_MODULE_NAMES,
)
from sparseml.modifiers.quantization_legacy.utils.fake_quant_wrapper import FakeQuantizeWrapper
from sparseml.modifiers.quantization_legacy.utils.fake_quant_wrapper import (
FakeQuantizeWrapper,
)
from sparseml.modifiers.quantization_legacy.utils.helpers import (
QATWrapper,
configure_module_default_qconfigs,
prepare_embeddings_qat,
)
from sparseml.modifiers.quantization_legacy.utils.quantization_scheme import QuantizationScheme
from sparseml.modifiers.quantization_legacy.utils.quantization_scheme import (
QuantizationScheme,
)
from sparseml.pytorch.utils import get_layer
from sparseml.utils.fsdp.context import fix_fsdp_module_name

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,12 @@
from torch import nn
from transformers.models.bert.modeling_bert import BertSelfAttention

from sparseml.modifiers.quantization_legacy.modification.modification_objects import QATMatMul
from sparseml.modifiers.quantization_legacy.modification.registry import ModificationRegistry
from sparseml.modifiers.quantization_legacy.modification.modification_objects import (
QATMatMul,
)
from sparseml.modifiers.quantization_legacy.modification.registry import (
ModificationRegistry,
)
from sparseml.pytorch.utils.helpers import swap_modules
from sparseml.transformers.sparsification.modification.base import (
check_transformers_version,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,12 @@
MultiHeadSelfAttention,
)

from sparseml.modifiers.quantization_legacy.modification.modification_objects import QATMatMul
from sparseml.modifiers.quantization_legacy.modification.registry import ModificationRegistry
from sparseml.modifiers.quantization_legacy.modification.modification_objects import (
QATMatMul,
)
from sparseml.modifiers.quantization_legacy.modification.registry import (
ModificationRegistry,
)
from sparseml.pytorch.utils.helpers import swap_modules
from sparseml.transformers.sparsification.modification.base import (
check_transformers_version,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,9 @@
QuantizableIdentity,
QuantizableMatMul,
)
from sparseml.modifiers.quantization_legacy.modification.registry import ModificationRegistry
from sparseml.modifiers.quantization_legacy.modification.registry import (
ModificationRegistry,
)
from sparseml.pytorch.utils.helpers import swap_modules
from sparseml.transformers.sparsification.modification.base import (
check_transformers_version,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,9 @@
QuantizableIdentity,
QuantizableMatMul,
)
from sparseml.modifiers.quantization_legacy.modification.registry import ModificationRegistry
from sparseml.modifiers.quantization_legacy.modification.registry import (
ModificationRegistry,
)
from sparseml.pytorch.utils.helpers import swap_modules
from sparseml.transformers.sparsification.modification.base import (
check_transformers_version,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,12 @@
from torch import nn
from transformers.models.mobilebert.modeling_mobilebert import MobileBertEmbeddings

from sparseml.modifiers.quantization_legacy.modification.modification_objects import QATLinear
from sparseml.modifiers.quantization_legacy.modification.registry import ModificationRegistry
from sparseml.modifiers.quantization_legacy.modification.modification_objects import (
QATLinear,
)
from sparseml.modifiers.quantization_legacy.modification.registry import (
ModificationRegistry,
)
from sparseml.pytorch.utils.helpers import swap_modules
from sparseml.transformers.sparsification.modification.base import (
check_transformers_version,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@
QuantizableBatchMatmul,
QuantizableIdentity,
)
from sparseml.modifiers.quantization_legacy.modification.registry import ModificationRegistry
from sparseml.modifiers.quantization_legacy.modification.registry import (
ModificationRegistry,
)
from sparseml.pytorch.utils.helpers import swap_modules
from sparseml.transformers.sparsification.modification.base import (
check_transformers_version,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@
import pytest

from sparseml.modifiers.quantization_legacy.modification import modify_model
from sparseml.modifiers.quantization_legacy.modification.registry import ModificationRegistry
from sparseml.modifiers.quantization_legacy.modification.registry import (
ModificationRegistry,
)
from sparsezoo.utils.registry import _ALIAS_REGISTRY, _REGISTRY, standardize_lookup_name


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@
from sparseml.core.framework import Framework
from sparseml.core.model import ModifiableModel
from sparseml.modifiers.obcq.pytorch import SparseGPTModifierPyTorch
from sparseml.modifiers.quantization_legacy.gptq.pytorch import GPTQModifierPyTorch
from sparseml.modifiers.quantization_legacy.pytorch import LegacyQuantizationModifierPyTorch
from sparseml.modifiers.quantization.base import QuantizationModifier
from sparseml.modifiers.quantization.gptq.pytorch import GPTQModifierPyTorch
from sparseml.modifiers.quantization.quantization.base import QuantizationModifier
from sparseml.modifiers.quantization_legacy.pytorch import (
LegacyQuantizationModifierPyTorch,
)
from tests.sparseml.modifiers.conf import LifecyleTestingHarness, setup_modifier_factory
from tests.sparseml.pytorch.helpers import LinearNet
from tests.testing_utils import requires_torch
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@
from sparseml.core.event import Event, EventType
from sparseml.core.factory import ModifierFactory
from sparseml.core.framework import Framework
from sparseml.modifiers.quantization_legacy.pytorch import LegacyQuantizationModifierPyTorch
from sparseml.modifiers.quantization_legacy.pytorch import (
LegacyQuantizationModifierPyTorch,
)
from sparseml.pytorch.sparsification.quantization.quantize import (
is_qat_helper_module,
is_quantizable_module,
Expand Down

0 comments on commit 25d8b1d

Please sign in to comment.