diff --git a/.github/labeler.yml b/.github/action_configs/labeler.yml
similarity index 100%
rename from .github/labeler.yml
rename to .github/action_configs/labeler.yml
diff --git a/.github/workflows/api_changes_check.yml b/.github/workflows/api_changes_check.yml
new file mode 100644
index 00000000000..7fc62ebd78f
--- /dev/null
+++ b/.github/workflows/api_changes_check.yml
@@ -0,0 +1,45 @@
+name: API changes check
+on:
+ pull_request:
+ branches:
+ - develop
+
+env:
+ API_DOC_HTML_ROOT_RELATIVE_PATH: autoapi
+
+concurrency:
+ group: ci-${{ github.head_ref }}-${{ github.workflow }}
+ cancel-in-progress: true
+
+jobs:
+ call-build-api-doc:
+ uses: ./.github/workflows/build_html_doc.yml
+ compare-api-doc-with-develop:
+ needs: call-build-api-doc
+ runs-on: ubuntu-latest
+ steps:
+ - name: Download built HTML doc as artifact from previous step
+ uses: alehechka/download-tartifact@v2
+ with:
+ name: html_doc_artifact
+ - name: Checkout latest doc_pages branch tip
+ uses: actions/checkout@v3
+ with:
+ ref: doc_pages
+ path: previous_doc_state
+ - name: Get changed file names in API doc path
+ id: diff
+ run: |
+ cd html_build/html/$API_DOC_HTML_ROOT_RELATIVE_PATH
+ CHANGED_FILES=$(find . -name "*.html" -exec diff -qrBZ {} $GITHUB_WORKSPACE/previous_doc_state/$API_DOC_HTML_ROOT_RELATIVE_PATH/{} \;)
+ echo ${CHANGED_FILES}
+ CHANGED_FILES=$(echo $CHANGED_FILES | tr '\n' ' ')
+ echo "changed_files=${CHANGED_FILES}" >> $GITHUB_OUTPUT
+ - uses: actions-ecosystem/action-remove-labels@v1
+ if: ${{ !contains(steps.diff.outputs.changed_files, 'differ') }}
+ with:
+ labels: API
+ - uses: actions-ecosystem/action-add-labels@v1
+ if: ${{ contains(steps.diff.outputs.changed_files, 'differ') }}
+ with:
+ labels: API
diff --git a/.github/workflows/build_and_publish_doc.yml b/.github/workflows/build_and_publish_doc.yml
new file mode 100644
index 00000000000..7500a1b604c
--- /dev/null
+++ b/.github/workflows/build_and_publish_doc.yml
@@ -0,0 +1,41 @@
+name: Build complete docs and publish to GH Pages
+on:
+ push:
+ branches:
+ - develop
+
+env:
+ GH_PAGES_BRANCH: doc_pages
+
+concurrency:
+ group: ci-${{ github.head_ref }}-${{ github.workflow }}
+ cancel-in-progress: true
+
+permissions:
+ contents: write
+jobs:
+ call-build-html-doc:
+ uses: ./.github/workflows/build_html_doc.yml
+ call-build-schema-page:
+ uses: ./.github/workflows/build_schema_page.yml
+ publish:
+ needs: [call-build-html-doc, call-build-schema-page]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout main repo # the github-pages-deploy-action seems to require this step
+ uses: actions/checkout@v3
+ - name: Download HTML doc build artifact
+ uses: alehechka/download-tartifact@v2
+ with:
+ name: html_doc_artifact
+ - name: Download schema doc build artifact
+ uses: alehechka/download-tartifact@v2
+ with:
+ name: schema_doc_artifact
+ path: html_build/html
+ - name: Publish built docs on Github Pages branch ${{ env.GH_PAGES_BRANCH }}
+ uses: JamesIves/github-pages-deploy-action@v4
+ with:
+ folder: html_build/html
+ token: ${{ secrets.PUSH_TO_GH_PAGES_BRANCH }}
+ branch: ${{ env.GH_PAGES_BRANCH }}
diff --git a/.github/workflows/build_html_doc.yml b/.github/workflows/build_html_doc.yml
new file mode 100644
index 00000000000..d07119ec631
--- /dev/null
+++ b/.github/workflows/build_html_doc.yml
@@ -0,0 +1,21 @@
+name: HTML documentation build
+on:
+ workflow_call:
+jobs:
+ build-html:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Install NNCF and doc requirements
+ run: |
+ pip install -e .
+ pip install -r docs/api/requirements.txt
+ - name: Build API docs
+ run: |
+ sphinx-build -M html docs/api/source html_build
+ - name: Upload built HTMLs as job artifact
+ uses: alehechka/upload-tartifact@v2
+ with:
+ name: html_doc_artifact
+ path: html_build/html
diff --git a/.github/workflows/build_schema_page.yml b/.github/workflows/build_schema_page.yml
index 395a6ea98c9..c14b697e642 100644
--- a/.github/workflows/build_schema_page.yml
+++ b/.github/workflows/build_schema_page.yml
@@ -1,31 +1,23 @@
name: Config schema HTML build
on:
- push:
- branches:
- - develop
- - test_for_doc_build_trigger
- paths:
- - nncf/config/**
-permissions:
- contents: write
+ workflow_call:
jobs:
- build-and-deploy:
- concurrency: ci-${{ github.ref }} # Recommended if you intend to make multiple deployments in quick succession.
+ build-config-schema-html:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- - name: Install and Build # This example project is built using npm and outputs the result to the 'build' folder. Replace with the commands required to build your project, or remove this step entirely if your site is pre-built.
+ - name: Install and Build
run: |
pip install json-schema-for-humans
pip install -e .
python -c 'import jstyleson; from nncf.config import NNCFConfig; jstyleson.dump(NNCFConfig.schema(), open("./schema.json", "w"), indent=2)'
- mkdir schema_html_build
- generate-schema-doc --deprecated-from-description schema.json schema_html_build/index.html
- - name: Deploy
- uses: JamesIves/github-pages-deploy-action@v4
+ mkdir schema
+ generate-schema-doc --deprecated-from-description schema.json schema/index.html
+
+ - name: Upload result as artifact
+ uses: alehechka/upload-tartifact@v2
with:
- folder: schema_html_build # The folder the action should deploy.
- token: ${{ secrets.PUSH_TO_GH_PAGES_BRANCH }}
- branch: doc_pages
+ name: schema_doc_artifact
+ path: schema
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
index 1b64d7304d2..c3eb8808fbd 100644
--- a/.github/workflows/labeler.yml
+++ b/.github/workflows/labeler.yml
@@ -2,7 +2,7 @@ name: "Pull Request Labeler"
on: [pull_request_target]
jobs:
- triage:
+ set-label:
permissions:
contents: read
pull-requests: write
@@ -11,5 +11,5 @@ jobs:
- uses: actions/labeler@v4
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
- configuration-path: '.github/labeler.yml'
+ configuration-path: '.github/action_configs/labeler.yml'
sync-labels: true
diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml
index b9882c135c1..94053830522 100644
--- a/.github/workflows/python-publish.yml
+++ b/.github/workflows/python-publish.yml
@@ -13,7 +13,7 @@
# This workflow will upload a Python Package using Twine when a release is created
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
-name: Upload Python Package
+name: Publish release Python package to PyPI
on:
release:
diff --git a/docs/api/Makefile b/docs/api/Makefile
new file mode 100644
index 00000000000..d0c3cbf1020
--- /dev/null
+++ b/docs/api/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/api/requirements.txt b/docs/api/requirements.txt
new file mode 100644
index 00000000000..b1f0c1a53ec
--- /dev/null
+++ b/docs/api/requirements.txt
@@ -0,0 +1,3 @@
+Sphinx==6.1.3
+sphinx-autoapi==2.1.0
+sphinx-book-theme==1.0.1
\ No newline at end of file
diff --git a/docs/api/source/_autoapi_templates/index.rst b/docs/api/source/_autoapi_templates/index.rst
new file mode 100644
index 00000000000..97687746ed0
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/index.rst
@@ -0,0 +1,13 @@
+NNCF API Reference
+=============
+
+.. toctree::
+ :titlesonly:
+
+ {% for page in pages %}
+ {% if page.top_level_object and page.display %}
+ {{ page.include_path }}
+ {% endif %}
+ {% endfor %}
+
+
diff --git a/docs/api/source/_autoapi_templates/python/attribute.rst b/docs/api/source/_autoapi_templates/python/attribute.rst
new file mode 100644
index 00000000000..ebaba555adc
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/attribute.rst
@@ -0,0 +1 @@
+{% extends "python/data.rst" %}
diff --git a/docs/api/source/_autoapi_templates/python/class.rst b/docs/api/source/_autoapi_templates/python/class.rst
new file mode 100644
index 00000000000..df5edffb62e
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/class.rst
@@ -0,0 +1,58 @@
+{% if obj.display %}
+.. py:{{ obj.type }}:: {{ obj.short_name }}{% if obj.args %}({{ obj.args }}){% endif %}
+{% for (args, return_annotation) in obj.overloads %}
+ {{ " " * (obj.type | length) }} {{ obj.short_name }}{% if args %}({{ args }}){% endif %}
+{% endfor %}
+
+
+ {% if obj.bases %}
+ {% if "show-inheritance" in autoapi_options %}
+ Bases: {% for base in obj.bases %}{{ base|link_objs }}{% if not loop.last %}, {% endif %}{% endfor %}
+ {% endif %}
+
+
+ {% if "show-inheritance-diagram" in autoapi_options and obj.bases != ["object"] %}
+ .. autoapi-inheritance-diagram:: {{ obj.obj["full_name"] }}
+ :parts: 1
+ {% if "private-members" in autoapi_options %}
+ :private-bases:
+ {% endif %}
+
+ {% endif %}
+ {% endif %}
+ {% if obj.docstring %}
+ {{ obj.docstring|indent(3) }}
+ {% endif %}
+ {% if "inherited-members" in autoapi_options %}
+ {% set visible_classes = obj.classes|selectattr("display")|list %}
+ {% else %}
+ {% set visible_classes = obj.classes|rejectattr("inherited")|selectattr("display")|list %}
+ {% endif %}
+ {% for klass in visible_classes %}
+ {{ klass.render()|indent(3) }}
+ {% endfor %}
+ {% if "inherited-members" in autoapi_options %}
+ {% set visible_properties = obj.properties|selectattr("display")|list %}
+ {% else %}
+ {% set visible_properties = obj.properties|rejectattr("inherited")|selectattr("display")|list %}
+ {% endif %}
+ {% for property in visible_properties %}
+ {{ property.render()|indent(3) }}
+ {% endfor %}
+ {% if "inherited-members" in autoapi_options %}
+ {% set visible_attributes = obj.attributes|selectattr("display")|list %}
+ {% else %}
+ {% set visible_attributes = obj.attributes|rejectattr("inherited")|selectattr("display")|list %}
+ {% endif %}
+ {% for attribute in visible_attributes %}
+ {{ attribute.render()|indent(3) }}
+ {% endfor %}
+ {% if "inherited-members" in autoapi_options %}
+ {% set visible_methods = obj.methods|selectattr("display")|list %}
+ {% else %}
+ {% set visible_methods = obj.methods|rejectattr("inherited")|selectattr("display")|list %}
+ {% endif %}
+ {% for method in visible_methods %}
+ {{ method.render()|indent(3) }}
+ {% endfor %}
+{% endif %}
diff --git a/docs/api/source/_autoapi_templates/python/data.rst b/docs/api/source/_autoapi_templates/python/data.rst
new file mode 100644
index 00000000000..3d12b2d0c7c
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/data.rst
@@ -0,0 +1,37 @@
+{% if obj.display %}
+.. py:{{ obj.type }}:: {{ obj.name }}
+ {%- if obj.annotation is not none %}
+
+ :type: {%- if obj.annotation %} {{ obj.annotation }}{%- endif %}
+
+ {%- endif %}
+
+ {%- if obj.value is not none %}
+
+ :value: {% if obj.value is string and obj.value.splitlines()|count > 1 -%}
+ Multiline-String
+
+ .. raw:: html
+
+ Show Value
+
+ .. code-block:: python
+
+ """{{ obj.value|indent(width=8,blank=true) }}"""
+
+ .. raw:: html
+
+
+
+ {%- else -%}
+ {%- if obj.value is string -%}
+ {{ "%r" % obj.value|string|truncate(100) }}
+ {%- else -%}
+ {{ obj.value|string|truncate(100) }}
+ {%- endif -%}
+ {%- endif %}
+ {%- endif %}
+
+
+ {{ obj.docstring|indent(3) }}
+{% endif %}
diff --git a/docs/api/source/_autoapi_templates/python/exception.rst b/docs/api/source/_autoapi_templates/python/exception.rst
new file mode 100644
index 00000000000..92f3d38fd5f
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/exception.rst
@@ -0,0 +1 @@
+{% extends "python/class.rst" %}
diff --git a/docs/api/source/_autoapi_templates/python/function.rst b/docs/api/source/_autoapi_templates/python/function.rst
new file mode 100644
index 00000000000..b00d5c24454
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/function.rst
@@ -0,0 +1,15 @@
+{% if obj.display %}
+.. py:function:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %}
+
+{% for (args, return_annotation) in obj.overloads %}
+ {{ obj.short_name }}({{ args }}){% if return_annotation is not none %} -> {{ return_annotation }}{% endif %}
+
+{% endfor %}
+ {% for property in obj.properties %}
+ :{{ property }}:
+ {% endfor %}
+
+ {% if obj.docstring %}
+ {{ obj.docstring|indent(3) }}
+ {% endif %}
+{% endif %}
diff --git a/docs/api/source/_autoapi_templates/python/method.rst b/docs/api/source/_autoapi_templates/python/method.rst
new file mode 100644
index 00000000000..723cb7bbe54
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/method.rst
@@ -0,0 +1,19 @@
+{%- if obj.display %}
+.. py:method:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %}
+
+{% for (args, return_annotation) in obj.overloads %}
+ {{ obj.short_name }}({{ args }}){% if return_annotation is not none %} -> {{ return_annotation }}{% endif %}
+
+{% endfor %}
+ {% if obj.properties %}
+ {% for property in obj.properties %}
+ :{{ property }}:
+ {% endfor %}
+
+ {% else %}
+
+ {% endif %}
+ {% if obj.docstring %}
+ {{ obj.docstring|indent(3) }}
+ {% endif %}
+{% endif %}
diff --git a/docs/api/source/_autoapi_templates/python/module.rst b/docs/api/source/_autoapi_templates/python/module.rst
new file mode 100644
index 00000000000..a95cfb6c0e3
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/module.rst
@@ -0,0 +1,112 @@
+{% if not obj.display %}
+:orphan:
+
+{% endif %}
+:py:mod:`{{ obj.name }}`
+=========={{ "=" * obj.name|length }}
+
+.. py:module:: {{ obj.name }}
+
+{% if obj.docstring %}
+.. autoapi-nested-parse::
+
+ {{ obj.docstring|indent(3) }}
+
+{% endif %}
+
+{% block subpackages %}
+{% set visible_subpackages = obj.subpackages|selectattr("display")|list %}
+{% if visible_subpackages %}
+Subpackages
+-----------
+.. toctree::
+ :titlesonly:
+ :maxdepth: 3
+
+{% for subpackage in visible_subpackages %}
+ {{ subpackage.short_name }}/index.rst
+{% endfor %}
+
+
+{% endif %}
+{% endblock %}
+{% block submodules %}
+{% set visible_submodules = obj.submodules|selectattr("display")|list %}
+{% if visible_submodules %}
+Submodules
+----------
+.. toctree::
+ :titlesonly:
+ :maxdepth: 1
+
+{% for submodule in visible_submodules %}
+ {{ submodule.short_name }}/index.rst
+{% endfor %}
+
+
+{% endif %}
+{% endblock %}
+{% block content %}
+{% if obj.all is not none %}
+{% set visible_children = obj.children|selectattr("short_name", "in", obj.all)|list %}
+{% elif obj.type is equalto("package") %}
+{% set visible_children = obj.children|selectattr("display")|list %}
+{% else %}
+{% set visible_children = obj.children|selectattr("display")|rejectattr("imported")|list %}
+{% endif %}
+{% if visible_children %}
+
+{% set visible_classes = visible_children|selectattr("type", "equalto", "class")|list %}
+{% set visible_functions = visible_children|selectattr("type", "equalto", "function")|list %}
+{% set visible_attributes = visible_children|selectattr("type", "equalto", "data")|list %}
+{% if "show-module-summary" in autoapi_options and (visible_classes or visible_functions) %}
+{% block classes scoped %}
+{% if visible_classes %}
+Classes
+~~~~~~~
+
+.. autoapisummary::
+
+{% for klass in visible_classes %}
+ {{ klass.id }}
+{% endfor %}
+
+
+{% endif %}
+{% endblock %}
+
+{% block functions scoped %}
+{% if visible_functions %}
+Functions
+~~~~~~~~~
+
+.. autoapisummary::
+
+{% for function in visible_functions %}
+ {{ function.id }}
+{% endfor %}
+
+
+{% endif %}
+{% endblock %}
+
+{% block attributes scoped %}
+{% if visible_attributes %}
+Attributes
+~~~~~~~~~~
+
+.. autoapisummary::
+
+{% for attribute in visible_attributes %}
+ {{ attribute.id }}
+{% endfor %}
+
+
+{% endif %}
+{% endblock %}
+{% endif %}
+{% for obj_item in visible_children %}
+{{ obj_item.render()|indent(0) }}
+{% endfor %}
+{% endif %}
+{% endblock %}
diff --git a/docs/api/source/_autoapi_templates/python/package.rst b/docs/api/source/_autoapi_templates/python/package.rst
new file mode 100644
index 00000000000..fb9a64965e9
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/package.rst
@@ -0,0 +1 @@
+{% extends "python/module.rst" %}
diff --git a/docs/api/source/_autoapi_templates/python/property.rst b/docs/api/source/_autoapi_templates/python/property.rst
new file mode 100644
index 00000000000..70af24236f1
--- /dev/null
+++ b/docs/api/source/_autoapi_templates/python/property.rst
@@ -0,0 +1,15 @@
+{%- if obj.display %}
+.. py:property:: {{ obj.short_name }}
+ {% if obj.annotation %}
+ :type: {{ obj.annotation }}
+ {% endif %}
+ {% if obj.properties %}
+ {% for property in obj.properties %}
+ :{{ property }}:
+ {% endfor %}
+ {% endif %}
+
+ {% if obj.docstring %}
+ {{ obj.docstring|indent(3) }}
+ {% endif %}
+{% endif %}
diff --git a/docs/api/source/conf.py b/docs/api/source/conf.py
new file mode 100644
index 00000000000..ccb44a06659
--- /dev/null
+++ b/docs/api/source/conf.py
@@ -0,0 +1,136 @@
+import importlib
+import inspect
+import os
+import pkgutil
+import sys
+from typing import Dict
+from typing import List
+
+from sphinx.ext.autodoc import mock
+
+
+sys.path.insert(0, os.path.abspath('../../..'))
+
+project = 'nncf'
+copyright = '2023, Intel Corporation'
+author = 'Intel Corporation'
+release = 'v2.4.0'
+
+
+extensions = ['autoapi.extension']
+
+autoapi_dirs = ['../../../nncf']
+autoapi_options = ['members', 'show-inheritance',
+ 'show-module-summary', 'special-members', 'imported-members']
+
+autoapi_template_dir = '_autoapi_templates'
+autoapi_keep_files = True
+autoapi_add_toctree_entry = False
+
+html_theme_options = {
+ 'navigation_depth': -1,
+}
+
+exclude_patterns = []
+
+
+def collect_api_entities() -> List[str]:
+ """
+ Collects the fully qualified names of symbols in NNCF package that contain a special attribute (set via
+ `nncf.common.api_marker.api` decorator) marking them as API entities.
+ :return: A list of fully qualified names of API symbols.
+ """
+ modules = {}
+ skipped_modules = {} # type: Dict[str, str]
+ import nncf
+ for importer, modname, ispkg in pkgutil.walk_packages(path=nncf.__path__,
+ prefix=nncf.__name__+'.',
+ onerror=lambda x: None):
+ try:
+ modules[modname] = importlib.import_module(modname)
+ except Exception as e:
+ skipped_modules[modname] = str(e)
+
+ from nncf.common.api_marker import api
+ api_fqns = dict()
+ aliased_fqns = {} # type: Dict[str, str]
+ for modname, module in modules.items():
+ print(f"{modname}")
+ for obj_name, obj in inspect.getmembers(module):
+ objects_module = getattr(obj, '__module__', None)
+ if objects_module == modname:
+ if inspect.isclass(obj) or inspect.isfunction(obj):
+ if hasattr(obj, api.API_MARKER_ATTR):
+ marked_object_name = obj._nncf_api_marker
+ # Check the actual name of the originally marked object
+ # so that the classes derived from base API classes don't
+ # all automatically end up in API
+ if marked_object_name != obj.__name__:
+ continue
+ fqn = f"{modname}.{obj_name}"
+ if hasattr(obj, api.CANONICAL_ALIAS_ATTR):
+ canonical_import_name = getattr(obj, api.CANONICAL_ALIAS_ATTR)
+ aliased_fqns[fqn] = canonical_import_name
+ if canonical_import_name == fqn:
+ print(f"\t{obj_name}")
+ else:
+ print(f"\t{obj_name} -> {canonical_import_name}")
+ api_fqns[fqn] = True
+
+ print()
+ skipped_str = '\n'.join([f"{k}: {v}" for k, v in skipped_modules.items()])
+ print(f"Skipped: {skipped_str}\n")
+ for fqn, canonical_alias in aliased_fqns.items():
+ try:
+ module_name, _, function_name = canonical_alias.rpartition('.')
+ getattr(importlib.import_module(module_name), function_name)
+ except (ImportError, AttributeError) as e:
+ print(
+ f"API entity with canonical_alias={canonical_alias} not available for import as specified!\n"
+ f"Adjust the __init__.py files so that the symbol is available for import as {canonical_alias}.")
+ raise e
+ api_fqns.pop(fqn)
+ api_fqns[canonical_alias] = True
+
+ print("API entities:")
+ for api_fqn in api_fqns:
+ print(api_fqn)
+ return list(api_fqns.keys())
+
+
+with mock(['torch', 'torchvision', 'onnx', 'onnxruntime', 'openvino', 'tensorflow', 'tensorflow_addons']):
+ api_fqns = collect_api_entities()
+
+module_fqns = set()
+
+for fqn in api_fqns:
+ path_elements = fqn.split('.')
+ for i in range(1, len(path_elements)):
+ intermediate_module_path = '.'.join(path_elements[:i])
+ module_fqns.add(intermediate_module_path)
+
+
+def skip_non_api(app, what, name, obj, skip, options):
+ # AutoAPI-allowed callback to skip certain elements from generated documentation.
+ # We use it to only allow API entities in the documentation (otherwise AutoAPI would generate docs for every
+ # non-private symbol available in NNCF)
+ if what in ["module", "package"] and name in module_fqns:
+ print(f"skip_non_api: keeping module {name}")
+ return skip
+ if what in ["method", "attribute"]:
+ class_name = name.rpartition('.')[0]
+ if class_name in api_fqns:
+ return skip
+ if name not in api_fqns:
+ skip = True
+ else:
+ print(f"skip_non_api: keeping API entity {name}")
+ return skip
+
+
+def setup(sphinx):
+ sphinx.connect("autoapi-skip-member", skip_non_api)
+
+
+html_theme = 'sphinx_book_theme'
+html_static_path = ['_static']
diff --git a/docs/api/source/index.rst b/docs/api/source/index.rst
new file mode 100644
index 00000000000..4d5159f8282
--- /dev/null
+++ b/docs/api/source/index.rst
@@ -0,0 +1,10 @@
+NNCF documentation
+================================
+
+.. toctree::
+ :caption: Contents:
+ :maxdepth: 3
+
+ API Reference
+ JSON configuration file schema<./schema#http://>
+
diff --git a/nncf/__init__.py b/nncf/__init__.py
index b904ddf36fc..79a6b67d36d 100644
--- a/nncf/__init__.py
+++ b/nncf/__init__.py
@@ -60,3 +60,5 @@
else:
nncf_logger.info(f"NNCF initialized successfully. Supported frameworks detected: "
f"{', '.join([name for name, loaded in _LOADED_FRAMEWORKS.items() if loaded])}")
+
+
diff --git a/nncf/common/accuracy_aware_training/training_loop.py b/nncf/common/accuracy_aware_training/training_loop.py
index 6a640a5a2f8..53a1f63a555 100644
--- a/nncf/common/accuracy_aware_training/training_loop.py
+++ b/nncf/common/accuracy_aware_training/training_loop.py
@@ -20,6 +20,7 @@
from scipy.interpolate import interp1d
from nncf.api.compression import CompressionAlgorithmController
+from nncf.common.api_marker import api
from nncf.common.composite_compression import CompositeCompressionAlgorithmController
from nncf.common.logging import nncf_logger
from nncf.common.utils.registry import Registry
@@ -168,6 +169,7 @@ def _accuracy_criterion_satisfied(self):
return accuracy_budget >= 0 and self.runner.is_model_fully_compressed(self.compression_controller)
+@api(canonical_alias="nncf.tensorflow.EarlyExitCompressionTrainingLoop")
class EarlyExitCompressionTrainingLoop(BaseEarlyExitCompressionTrainingLoop):
"""
Adaptive compression training loop allows an accuracy-aware training process
@@ -191,6 +193,7 @@ def __init__(self,
self.runner = runner_factory.create_training_loop()
+@api(canonical_alias="nncf.tensorflow.AdaptiveCompressionTrainingLoop")
class AdaptiveCompressionTrainingLoop(BaseEarlyExitCompressionTrainingLoop):
"""
Adaptive compression training loop allows an accuracy-aware training process whereby
diff --git a/nncf/common/api_marker.py b/nncf/common/api_marker.py
new file mode 100644
index 00000000000..c8939da9d89
--- /dev/null
+++ b/nncf/common/api_marker.py
@@ -0,0 +1,33 @@
+"""
+ Copyright (c) 2023 Intel Corporation
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+
+class api:
+ API_MARKER_ATTR = "_nncf_api_marker"
+ CANONICAL_ALIAS_ATTR = "_nncf_canonical_alias"
+
+ def __init__(self, canonical_alias: str = None):
+ self._canonical_alias = canonical_alias
+
+ def __call__(self, obj):
+ # The value of the marker will be useful in determining
+ # whether we are handling a base class or a derived one.
+ setattr(obj, api.API_MARKER_ATTR, obj.__name__)
+ if self._canonical_alias is not None:
+ setattr(obj, api.CANONICAL_ALIAS_ATTR, self._canonical_alias)
+ return obj
+
+
+def is_api(obj) -> bool:
+ return hasattr(obj, api.API_MARKER_ATTR)
+
diff --git a/nncf/common/initialization/dataloader.py b/nncf/common/initialization/dataloader.py
index a91b8ca6336..c7011b8086c 100644
--- a/nncf/common/initialization/dataloader.py
+++ b/nncf/common/initialization/dataloader.py
@@ -13,7 +13,10 @@
from abc import ABC, abstractmethod
+from nncf.common.api_marker import api
+
+@api()
class NNCFDataLoader(ABC):
"""
Wraps a custom data source.
diff --git a/nncf/common/quantization/structs.py b/nncf/common/quantization/structs.py
index 61f284b21e1..4988055688a 100644
--- a/nncf/common/quantization/structs.py
+++ b/nncf/common/quantization/structs.py
@@ -15,6 +15,7 @@
from enum import Enum
from typing import Dict, List, Optional, Any
+from nncf.common.api_marker import api
from nncf.common.graph import NNCFNode
from nncf.common.graph import NNCFNodeName
from nncf.config.schemata.defaults import QUANTIZATION_BITS
@@ -308,6 +309,7 @@ class UnifiedScaleType(Enum):
UNIFY_ALWAYS = 1
+@api(canonical_alias="nncf.QuantizationPreset")
class QuantizationPreset(Enum):
PERFORMANCE = 'performance'
MIXED = 'mixed'
diff --git a/nncf/config/config.py b/nncf/config/config.py
index ff22768de24..1cf960f6a41 100644
--- a/nncf/config/config.py
+++ b/nncf/config/config.py
@@ -20,6 +20,7 @@
import jsonschema
import jstyleson as json
+from nncf.common.api_marker import api
from nncf.common.logging import nncf_logger
from nncf.common.utils.os import safe_open
from nncf.config.definitions import SCHEMA_VISUALIZATION_URL
@@ -29,6 +30,7 @@
from nncf.config.structures import NNCFExtraConfigStruct
+@api(canonical_alias="nncf.NNCFConfig")
class NNCFConfig(dict):
"""A regular dictionary object extended with some utility functions."""
diff --git a/nncf/config/structures.py b/nncf/config/structures.py
index a7df6c9569d..319c60a2cb4 100644
--- a/nncf/config/structures.py
+++ b/nncf/config/structures.py
@@ -13,6 +13,7 @@
from typing import Optional, Callable
+from nncf.common.api_marker import api
from nncf.common.initialization.dataloader import NNCFDataLoader
@@ -27,6 +28,7 @@ def get_id(cls) -> str:
raise NotImplementedError
+@api()
class QuantizationRangeInitArgs(NNCFExtraConfigStruct):
"""
Stores additional arguments for quantization range initialization algorithms.
@@ -59,6 +61,7 @@ def get_id(cls) -> str:
return 'quantization_range_init_args'
+@api()
class BNAdaptationInitArgs(NNCFExtraConfigStruct):
"""
Stores additional arguments for batchnorm statistics adaptation algorithm.
@@ -91,6 +94,7 @@ def get_id(cls) -> str:
return 'bn_adaptation_init_args'
+@api()
class ModelEvaluationArgs(NNCFExtraConfigStruct):
def __init__(self,
eval_fn: Callable):
diff --git a/nncf/data/dataset.py b/nncf/data/dataset.py
index 8b7ea3f3960..1d7ccffe1c4 100644
--- a/nncf/data/dataset.py
+++ b/nncf/data/dataset.py
@@ -18,11 +18,13 @@
from typing import Generic
from typing import TypeVar
+from nncf.common.api_marker import api
DataItem = TypeVar('DataItem')
ModelInput = TypeVar('ModelInput')
+@api(canonical_alias="nncf.Dataset")
class Dataset(Generic[DataItem, ModelInput]):
"""
The `nncf.Dataset` class defines the interface by which compression algorithms
diff --git a/nncf/onnx/engine.py b/nncf/onnx/engine.py
index 555783a6bc7..9e76d8bc3fa 100644
--- a/nncf/onnx/engine.py
+++ b/nncf/onnx/engine.py
@@ -18,7 +18,6 @@
from nncf.common.engine import Engine
-
class ONNXEngine(Engine):
"""
Engine for ONNX backend using ONNXRuntime to infer the model.
diff --git a/nncf/parameters.py b/nncf/parameters.py
index cb4a90cb9b2..083fd9f43bf 100644
--- a/nncf/parameters.py
+++ b/nncf/parameters.py
@@ -13,6 +13,10 @@
from enum import Enum
+from nncf.common.api_marker import api
+
+
+@api(canonical_alias="nncf.TargetDevice")
class TargetDevice(Enum):
"""
Describes the target device the specificity of which will be taken
@@ -32,6 +36,7 @@ class TargetDevice(Enum):
CPU_SPR = 'CPU_SPR'
+@api(canonical_alias="nncf.ModelType")
class ModelType(Enum):
"""
Describes the model type the specificity of which will be taken into
diff --git a/nncf/quantization/quantize.py b/nncf/quantization/quantize.py
index d6f687df4af..a0e3a3748d0 100644
--- a/nncf/quantization/quantize.py
+++ b/nncf/quantization/quantize.py
@@ -17,6 +17,7 @@
from typing import Any
from nncf.api.compression import TModel
+from nncf.common.api_marker import api
from nncf.common.quantization.structs import QuantizationPreset
from nncf.common.utils.backend import BackendType
from nncf.common.utils.backend import get_backend
@@ -26,6 +27,7 @@
from nncf.parameters import TargetDevice
+@api(canonical_alias="nncf.quantize")
def quantize(model: TModel,
calibration_dataset: Dataset,
preset: QuantizationPreset = QuantizationPreset.PERFORMANCE,
@@ -83,6 +85,7 @@ def quantize(model: TModel,
raise RuntimeError(f'Unsupported type of backend: {backend}')
+@api(canonical_alias="nncf.quantize_with_accuracy_control")
def quantize_with_accuracy_control(model: TModel,
calibration_dataset: Dataset,
validation_dataset: Dataset,
diff --git a/nncf/scopes.py b/nncf/scopes.py
index feff6869c35..160fcbd4491 100644
--- a/nncf/scopes.py
+++ b/nncf/scopes.py
@@ -16,10 +16,12 @@
import re
from typing import List, Optional
-from nncf.common.graph.graph import NNCFGraph
+from nncf.common.api_marker import api
from nncf.common.logging import nncf_logger
+from nncf.common.graph.graph import NNCFGraph
+@api(canonical_alias="nncf.IgnoredScope")
@dataclass
class IgnoredScope:
"""
@@ -111,11 +113,11 @@ def get_ignored_node_names_from_ignored_scope(ignored_scope: IgnoredScope,
if ignored_node_name in node_names:
matched_by_names.append(ignored_node_name)
if strict and len(ignored_scope.names) != len(matched_by_names):
- skipped_names = set(ignored_scope.names) - set(matched_by_names)
- raise RuntimeError(f'Ignored nodes with name {list(skipped_names)}'
- ' were not found in the NNCFGraph. ' + error_msg)
+ skipped_names = set(ignored_scope.names) - set(matched_by_names)
+ raise RuntimeError(f'Ignored nodes with name {list(skipped_names)}'
+ ' were not found in the NNCFGraph. ' + error_msg)
nncf_logger.info(f'{len(matched_by_names)}'
- ' ignored nodes was found by name in the NNCFGraph')
+ ' ignored nodes was found by name in the NNCFGraph')
matched_by_patterns = []
if ignored_scope.patterns:
diff --git a/nncf/tensorflow/__init__.py b/nncf/tensorflow/__init__.py
index 5f98c807144..fa8793b0417 100644
--- a/nncf/tensorflow/__init__.py
+++ b/nncf/tensorflow/__init__.py
@@ -18,14 +18,20 @@
import tensorflow
from pkg_resources import parse_version
-tensorflow_version = parse_version(tensorflow.__version__).base_version
+try:
+ _tf_version = tensorflow.__version__
+ tensorflow_version = parse_version(_tf_version).base_version
+except:
+ nncf_logger.debug("Could not parse tensorflow version")
+ _tf_version = '0.0.0'
+ tensorflow_version = parse_version(_tf_version).base_version
tensorflow_version_major, tensorflow_version_minor = tuple(map(int, tensorflow_version.split('.')))[:2]
if not tensorflow_version.startswith(BKC_TF_VERSION[:-2]):
- warn_bkc_version_mismatch("tensorflow", BKC_TF_VERSION, tensorflow.__version__)
+ warn_bkc_version_mismatch("tensorflow", BKC_TF_VERSION, _tf_version)
elif not (tensorflow_version_major == 2 and 4 <= tensorflow_version_minor <= 11):
raise RuntimeError(
f'NNCF only supports 2.4.0 <= tensorflow <= 2.11.*, '
- f'while current tensorflow version is {tensorflow.__version__}')
+ f'while current tensorflow version is {_tf_version}')
from nncf.tensorflow.helpers import create_compressed_model
diff --git a/nncf/tensorflow/helpers/callback_creation.py b/nncf/tensorflow/helpers/callback_creation.py
index 9488bd037d3..ad1280aedcd 100644
--- a/nncf/tensorflow/helpers/callback_creation.py
+++ b/nncf/tensorflow/helpers/callback_creation.py
@@ -10,7 +10,7 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
+from nncf.common.api_marker import api
from nncf.common.composite_compression import CompositeCompressionAlgorithmController
from nncf.tensorflow.pruning.base_algorithm import BasePruningAlgoController
from nncf.tensorflow.pruning.callbacks import PruningStatisticsCallback
@@ -19,6 +19,7 @@
from nncf.tensorflow.sparsity.base_algorithm import BaseSparsityController
+@api(canonical_alias="nncf.tensorflow.create_compression_callbacks")
def create_compression_callbacks(compression_ctrl, log_tensorboard=True, log_text=True, log_dir=None):
compression_controllers = compression_ctrl.child_ctrls \
if isinstance(compression_ctrl, CompositeCompressionAlgorithmController) \
diff --git a/nncf/tensorflow/helpers/model_creation.py b/nncf/tensorflow/helpers/model_creation.py
index 88813d8edd7..4ab46cb849f 100644
--- a/nncf/tensorflow/helpers/model_creation.py
+++ b/nncf/tensorflow/helpers/model_creation.py
@@ -21,6 +21,7 @@
from nncf import NNCFConfig
from nncf.api.compression import CompressionAlgorithmController
+from nncf.common.api_marker import api
from nncf.common.compression import BaseCompressionAlgorithmController as BaseController
from nncf.config.extractors import extract_algorithm_names
from nncf.config.telemetry_extractors import CompressionStartedFromConfig
@@ -58,7 +59,7 @@ def create_compression_algorithm_builder(config: NNCFConfig,
return TFCompositeCompressionAlgorithmBuilder(config, should_init)
-
+@api(canonical_alias="nncf.tensorflow.create_compressed_model")
@tracked_function(NNCF_TF_CATEGORY, [CompressionStartedFromConfig(argname="config"), ])
def create_compressed_model(model: tf.keras.Model,
config: NNCFConfig,
diff --git a/nncf/tensorflow/initialization.py b/nncf/tensorflow/initialization.py
index bdd67fe5671..499b97a795d 100644
--- a/nncf/tensorflow/initialization.py
+++ b/nncf/tensorflow/initialization.py
@@ -13,6 +13,7 @@
import tensorflow as tf
+from nncf.common.api_marker import api
from nncf.common.initialization.dataloader import NNCFDataLoader
from nncf.config import NNCFConfig
from nncf.config.structures import BNAdaptationInitArgs
@@ -38,6 +39,7 @@ def __iter__(self):
return iter(self._data_loader)
+@api(canonical_alias="nncf.tensorflow.register_default_init_args")
def register_default_init_args(nncf_config: NNCFConfig,
data_loader: tf.data.Dataset,
batch_size: int,
diff --git a/nncf/tensorflow/pruning/filter_pruning/algorithm.py b/nncf/tensorflow/pruning/filter_pruning/algorithm.py
index a0f8da72d76..a03fd24e8da 100644
--- a/nncf/tensorflow/pruning/filter_pruning/algorithm.py
+++ b/nncf/tensorflow/pruning/filter_pruning/algorithm.py
@@ -20,6 +20,7 @@
from nncf import NNCFConfig
from nncf.api.compression import CompressionLoss
from nncf.api.compression import CompressionStage
+from nncf.common.api_marker import api
from nncf.common.graph import NNCFGraph
from nncf.common.initialization.batchnorm_adaptation import BatchnormAdaptationAlgorithm
from nncf.common.pruning.clusterization import Cluster
@@ -97,6 +98,7 @@ def _get_types_of_grouping_ops(self) -> List[str]:
return TFElementwisePruningOp.get_all_op_aliases()
+@api()
@ADAPTIVE_COMPRESSION_CONTROLLERS.register('tf_filter_pruning')
class FilterPruningController(BasePruningAlgoController):
"""
diff --git a/nncf/tensorflow/quantization/algorithm.py b/nncf/tensorflow/quantization/algorithm.py
index 8819bb5a3ac..475aa4409b2 100644
--- a/nncf/tensorflow/quantization/algorithm.py
+++ b/nncf/tensorflow/quantization/algorithm.py
@@ -22,6 +22,7 @@
from nncf.api.compression import CompressionLoss
from nncf.api.compression import CompressionScheduler
from nncf.api.compression import CompressionStage
+from nncf.common.api_marker import api
from nncf.common.compression import BaseCompressionAlgorithmController
from nncf.common.graph import INPUT_NOOP_METATYPES
from nncf.common.graph import OUTPUT_NOOP_METATYPES
@@ -680,6 +681,7 @@ def _get_quantizer_operation_name(self, layer_name, weight_attr_name):
return f'{layer_name}_{weight_attr_name}_quantizer'
+@api()
class QuantizationController(BaseCompressionAlgorithmController):
def __init__(self, target_model, config, op_names: List[str]):
super().__init__(target_model)
diff --git a/nncf/tensorflow/sparsity/magnitude/algorithm.py b/nncf/tensorflow/sparsity/magnitude/algorithm.py
index 5354168b41b..b2c6a3e772f 100644
--- a/nncf/tensorflow/sparsity/magnitude/algorithm.py
+++ b/nncf/tensorflow/sparsity/magnitude/algorithm.py
@@ -20,6 +20,7 @@
from nncf.api.compression import CompressionScheduler
from nncf.api.compression import CompressionStage
from nncf.common.accuracy_aware_training.training_loop import ADAPTIVE_COMPRESSION_CONTROLLERS
+from nncf.common.api_marker import api
from nncf.common.graph import OUTPUT_NOOP_METATYPES
from nncf.common.graph.transformations.commands import TransformationPriority
from nncf.common.initialization.batchnorm_adaptation import BatchnormAdaptationAlgorithm
@@ -132,6 +133,7 @@ def initialize(self, model: tf.keras.Model) -> None:
pass
+@api()
@ADAPTIVE_COMPRESSION_CONTROLLERS.register('tf_magnitude_sparsity')
class MagnitudeSparsityController(BaseSparsityController):
"""
diff --git a/nncf/tensorflow/sparsity/rb/algorithm.py b/nncf/tensorflow/sparsity/rb/algorithm.py
index 737ec108365..80689be691f 100644
--- a/nncf/tensorflow/sparsity/rb/algorithm.py
+++ b/nncf/tensorflow/sparsity/rb/algorithm.py
@@ -19,6 +19,7 @@
from nncf import NNCFConfig
from nncf.common.accuracy_aware_training.training_loop import ADAPTIVE_COMPRESSION_CONTROLLERS
+from nncf.common.api_marker import api
from nncf.common.graph.transformations.commands import TransformationPriority
from nncf.common.schedulers import StubCompressionScheduler
from nncf.common.scopes import check_scopes_in_graph
@@ -107,6 +108,7 @@ def initialize(self, model: tf.keras.Model) -> None:
pass
+@api()
@ADAPTIVE_COMPRESSION_CONTROLLERS.register('tf_rb_sparsity')
class RBSparsityController(BaseSparsityController):
def __init__(self, target_model, config: NNCFConfig, op_names: List[str]):
diff --git a/nncf/torch/__init__.py b/nncf/torch/__init__.py
index d05eef2d30d..67e2cbcfaf4 100644
--- a/nncf/torch/__init__.py
+++ b/nncf/torch/__init__.py
@@ -17,7 +17,15 @@
import torch
from pkg_resources import parse_version
-torch_version = parse_version(torch.__version__).base_version
+
+try:
+ _torch_version = torch.__version__
+ torch_version = parse_version(_torch_version).base_version
+except:
+ nncf_logger.debug("Could not parse torch version")
+ _torch_version = '0.0.0'
+ torch_version = parse_version(_torch_version).base_version
+
if parse_version(BKC_TORCH_VERSION).base_version != torch_version:
warn_bkc_version_mismatch("torch", BKC_TORCH_VERSION, torch.__version__)
diff --git a/nncf/torch/automl/__init__.py b/nncf/torch/automl/__init__.py
new file mode 100644
index 00000000000..8727b935935
--- /dev/null
+++ b/nncf/torch/automl/__init__.py
@@ -0,0 +1,12 @@
+"""
+ Copyright (c) 2023 Intel Corporation
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
diff --git a/nncf/torch/automl/agent/__init__.py b/nncf/torch/automl/agent/__init__.py
new file mode 100644
index 00000000000..8727b935935
--- /dev/null
+++ b/nncf/torch/automl/agent/__init__.py
@@ -0,0 +1,12 @@
+"""
+ Copyright (c) 2023 Intel Corporation
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
diff --git a/nncf/torch/automl/agent/ddpg/__init__.py b/nncf/torch/automl/agent/ddpg/__init__.py
new file mode 100644
index 00000000000..8727b935935
--- /dev/null
+++ b/nncf/torch/automl/agent/ddpg/__init__.py
@@ -0,0 +1,12 @@
+"""
+ Copyright (c) 2023 Intel Corporation
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
diff --git a/nncf/torch/automl/environment/__init__.py b/nncf/torch/automl/environment/__init__.py
new file mode 100644
index 00000000000..8727b935935
--- /dev/null
+++ b/nncf/torch/automl/environment/__init__.py
@@ -0,0 +1,12 @@
+"""
+ Copyright (c) 2023 Intel Corporation
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
diff --git a/nncf/torch/checkpoint_loading.py b/nncf/torch/checkpoint_loading.py
index 6ca6a4095e9..be563f1b300 100644
--- a/nncf/torch/checkpoint_loading.py
+++ b/nncf/torch/checkpoint_loading.py
@@ -16,10 +16,11 @@
import torch
+from nncf.common.api_marker import api
from nncf.common.logging import nncf_logger
from nncf.common.deprecation import warning_deprecated
-
+@api(canonical_alias="nncf.torch.load_state")
def load_state(model: torch.nn.Module, state_dict_to_load: dict, is_resume: bool = False,
keys_to_ignore: List[str] = None) -> int:
"""
diff --git a/nncf/torch/dynamic_graph/context.py b/nncf/torch/dynamic_graph/context.py
index c5069a8fa4e..e3ab86a87b9 100644
--- a/nncf/torch/dynamic_graph/context.py
+++ b/nncf/torch/dynamic_graph/context.py
@@ -22,6 +22,7 @@
import torch
+from nncf.common.api_marker import api
from nncf.common.graph.layer_attributes import BaseLayerAttributes
from nncf.common.utils.debug import is_debug
from nncf.torch.dynamic_graph.graph import DynamicGraph
@@ -429,6 +430,7 @@ def set_current_context(c: TracingContext):
_CURRENT_CONTEXT.context = c
+@api(canonical_alias="nncf.torch.no_nncf_trace")
@contextmanager
def no_nncf_trace():
ctx = get_current_context()
@@ -440,6 +442,7 @@ def no_nncf_trace():
yield
+@api(canonical_alias="nncf.torch.forward_nncf_trace")
@contextmanager
def forward_nncf_trace():
ctx = get_current_context()
@@ -455,6 +458,7 @@ def get_current_context() -> TracingContext:
return _CURRENT_CONTEXT.context
+@api(canonical_alias="nncf.torch.disable_tracing")
def disable_tracing(method):
"""
Patch a method so that it will be executed within no_nncf_trace context
diff --git a/nncf/torch/dynamic_graph/io_handling.py b/nncf/torch/dynamic_graph/io_handling.py
index 51e50b1815d..16da857978a 100644
--- a/nncf/torch/dynamic_graph/io_handling.py
+++ b/nncf/torch/dynamic_graph/io_handling.py
@@ -5,6 +5,7 @@
import torch
+from nncf.common.api_marker import api
from nncf.common.graph.definitions import MODEL_INPUT_OP_NAME
from nncf.common.graph.definitions import MODEL_OUTPUT_OP_NAME
from nncf.torch.dynamic_graph.patch_pytorch import register_operator
@@ -16,11 +17,13 @@
from nncf.torch.dynamic_graph.context import forward_nncf_trace
+@api(canonical_alias="nncf.torch.nncf_model_input")
@register_operator(name=MODEL_INPUT_OP_NAME)
def nncf_model_input(tensor: 'torch.Tensor'):
return tensor
+@api(canonical_alias="nncf.torch.nncf_model_output")
@register_operator(name=MODEL_OUTPUT_OP_NAME)
def nncf_model_output(tensor: 'torch.Tensor'):
return tensor
diff --git a/nncf/torch/dynamic_graph/patch_pytorch.py b/nncf/torch/dynamic_graph/patch_pytorch.py
index cace2681f7a..e85e8d085f3 100644
--- a/nncf/torch/dynamic_graph/patch_pytorch.py
+++ b/nncf/torch/dynamic_graph/patch_pytorch.py
@@ -24,6 +24,7 @@
from torch.nn.parallel import DistributedDataParallel
from nncf import nncf_logger
+from nncf.common.api_marker import api
from nncf.torch.dynamic_graph.structs import NamespaceTarget
from nncf.torch.dynamic_graph.trace_tensor import TracedTensor
from nncf.torch.dynamic_graph.wrappers import ignore_scope
@@ -96,6 +97,7 @@ class MagicFunctionsToPatch:
}
+@api(canonical_alias="nncf.torch.load_state")
def register_operator(name=None):
def wrap(operator):
op_name = name
diff --git a/nncf/torch/extensions/__init__.py b/nncf/torch/extensions/__init__.py
index b7c40e866d5..7a014ade93a 100644
--- a/nncf/torch/extensions/__init__.py
+++ b/nncf/torch/extensions/__init__.py
@@ -8,6 +8,7 @@
from torch.utils.cpp_extension import _get_build_directory
+from nncf.common.api_marker import api
from nncf.common.logging.logger import extension_is_loading_info_log
from nncf.common.utils.registry import Registry
from nncf.common.logging import nncf_logger
@@ -80,10 +81,12 @@ def _force_build_extensions(ext_type: ExtensionsType):
class_type.load()
+@api(canonical_alias="nncf.torch.force_build_cpu_extensions")
def force_build_cpu_extensions():
_force_build_extensions(ExtensionsType.CPU)
+@api(canonical_alias="nncf.torch.force_build_cuda_extensions")
def force_build_cuda_extensions():
_force_build_extensions(ExtensionsType.CUDA)
diff --git a/nncf/torch/initialization.py b/nncf/torch/initialization.py
index 58e3c25d128..f673581e5c4 100644
--- a/nncf/torch/initialization.py
+++ b/nncf/torch/initialization.py
@@ -11,6 +11,7 @@
from torch.nn.modules.loss import _Loss
from torch.utils.data import DataLoader
+from nncf.common.api_marker import api
from nncf.common.initialization.dataloader import NNCFDataLoader
from nncf.common.logging import nncf_logger
from nncf.common.logging.progress_bar import ProgressBar
@@ -224,6 +225,7 @@ def default_criterion_fn(outputs: Any, target: Any, criterion: Any) -> torch.Ten
return criterion(outputs, target)
+@api(canonical_alias="nncf.torch.register_default_init_args")
def register_default_init_args(nncf_config: 'NNCFConfig',
train_loader: torch.utils.data.DataLoader,
criterion: _Loss = None,
diff --git a/nncf/torch/knowledge_distillation/algo.py b/nncf/torch/knowledge_distillation/algo.py
index 46fd309be7d..6940a9b6672 100644
--- a/nncf/torch/knowledge_distillation/algo.py
+++ b/nncf/torch/knowledge_distillation/algo.py
@@ -15,6 +15,7 @@
from torch import nn
+from nncf.common.api_marker import api
from nncf.common.schedulers import BaseCompressionScheduler
from nncf.common.statistics import NNCFStatistics
from nncf.config.schemata.defaults import KNOWLEDGE_DISTILLATION_SCALE
@@ -52,6 +53,7 @@ def initialize(self, model: NNCFNetwork) -> None:
pass
+@api()
class KnowledgeDistillationController(PTCompressionAlgorithmController):
def __init__(self, target_model: NNCFNetwork, original_model: nn.Module, kd_type: str, scale: float,
temperature: float):
diff --git a/nncf/torch/layers.py b/nncf/torch/layers.py
index c938ad6467d..8f5a1c9423c 100644
--- a/nncf/torch/layers.py
+++ b/nncf/torch/layers.py
@@ -25,6 +25,7 @@
from torch.nn.utils.weight_norm import WeightNorm
from nncf import nncf_logger
+from nncf.common.api_marker import api
from nncf.torch.dynamic_graph.context import forward_nncf_trace
from nncf.torch.utils import no_jit_trace
from nncf.torch.checkpoint_loading import OPTIONAL_PARAMETERS_REGISTRY
@@ -385,6 +386,7 @@ def from_module(module):
NNCF_WRAPPED_USER_MODULES_DICT = {}
+@api(canonical_alias="nncf.torch.load_state")
def register_module(*quantizable_field_names: str, ignored_algorithms: list = None):
# quantizable_field_names will work for `weight` attributes only. Should later extend to registering
# customly named attributes if it becomes necessary
diff --git a/nncf/torch/model_creation.py b/nncf/torch/model_creation.py
index 62af55c5a23..1e36de2259b 100644
--- a/nncf/torch/model_creation.py
+++ b/nncf/torch/model_creation.py
@@ -23,6 +23,7 @@
from torch.nn import Module
from nncf.api.compression import CompressionAlgorithmController
+from nncf.common.api_marker import api
from nncf.common.compression import BaseCompressionAlgorithmController as BaseController
from nncf.common.logging import nncf_logger
from nncf.common.utils.debug import set_debug_log_dir
@@ -44,6 +45,7 @@
from nncf.torch.utils import training_mode_switcher
+@api(canonical_alias="nncf.torch.create_compressed_model")
@tracked_function(NNCF_PT_CATEGORY, [CompressionStartedFromConfig(argname="config"), ])
def create_compressed_model(model: Module,
config: NNCFConfig,
@@ -89,6 +91,7 @@ def create_compressed_model(model: Module,
as an object of NNCFNetwork."""
set_debug_log_dir(config.get("log_dir", "."))
+ print("Test")
is_legacy_model_state_dict = compression_state is not None and \
BaseController.BUILDER_STATE not in compression_state and \
@@ -140,29 +143,31 @@ def create_nncf_network(model: torch.nn.Module,
"""
The main function used to produce a model ready for adding compression from an original PyTorch
model and a configuration object.
- dummy_forward_fn
+
:param model: The original model. Should have its parameters already loaded from a checkpoint or another
- source.
+ source.
:param config: A configuration object used to determine the exact compression modifications to be applied
- to the model
+ to the model
:param dummy_forward_fn: if supplied, will be used instead of a *forward* function call to build
- the internal graph representation via tracing. Specifying this is useful when the original training pipeline
- has special formats of data loader output or has additional *forward* arguments other than input tensors.
- Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
- to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to nncf.nncf_model_input
- functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these
- calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is
- specified, then wrap_inputs_fn also must be specified.
+ the internal graph representation via tracing. Specifying this is useful when the original training pipeline
+ has special formats of data loader output or has additional *forward* arguments other than input tensors.
+ Otherwise, the *forward* call of the model during graph tracing will be made with mock tensors according
+ to the shape specified in the config object. The dummy_forward_fn code MUST contain calls to
+ nncf.nncf_model_input
+ functions made with each compressed model input tensor in the underlying model's args/kwargs tuple, and these
+ calls should be exactly the same as in the wrap_inputs_fn function code (see below); if dummy_forward_fn is
+ specified, then wrap_inputs_fn also must be specified.
:param wrap_inputs_fn: if supplied, will be used on the module's input arguments during a regular, non-dummy
- forward call before passing the inputs to the underlying compressed model. This is required if the model's input
- tensors that are important for compression are not supplied as arguments to the model's forward call directly, but
- instead are located in a container (such as list), and the model receives the container as an argument.
- wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying
- model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among the
- supplied model's args and kwargs that is important for compression (e.g. quantization) with an nncf.nncf_model_input
- function, which is a no-operation function and marks the tensors as inputs to be traced by NNCF in the internal
- graph representation. Output is the tuple of (args, kwargs), where args and kwargs are the same as were supplied in
- input, but each tensor in the original input. Must be specified if dummy_forward_fn is specified.
+ forward call before passing the inputs to the underlying compressed model. This is required if the model's input
+ tensors that are important for compression are not supplied as arguments to the model's forward call directly,
+ but instead are located in a container (such as list), and the model receives the container as an argument.
+ wrap_inputs_fn should take as input two arguments - the tuple of positional arguments to the underlying
+ model's forward call, and a dict of keyword arguments to the same. The function should wrap each tensor among
+ the supplied model's args and kwargs that is important for compression (e.g. quantization) with an
+ nncf.nncf_model_input function, which is a no-operation function and marks the tensors as inputs to be traced
+ by NNCF in the internal graph representation. Output is the tuple of (args, kwargs), where args and kwargs are
+ the same as were supplied in input, but each tensor in the original input. Must be specified if
+ dummy_forward_fn is specified.
:param wrap_outputs_fn: if supplied, will be used on the module's output during a regular, non-dummy forward call.
:return: A model wrapped by NNCFNetwork, which is ready for adding compression. """
diff --git a/nncf/torch/pruning/filter_pruning/algo.py b/nncf/torch/pruning/filter_pruning/algo.py
index 6449a55b798..be2301f6cfd 100644
--- a/nncf/torch/pruning/filter_pruning/algo.py
+++ b/nncf/torch/pruning/filter_pruning/algo.py
@@ -26,6 +26,7 @@
from nncf.api.compression import CompressionLoss
from nncf.api.compression import CompressionStage
from nncf.common.accuracy_aware_training.training_loop import ADAPTIVE_COMPRESSION_CONTROLLERS
+from nncf.common.api_marker import api
from nncf.common.graph import NNCFNode
from nncf.common.graph import NNCFNodeName
from nncf.common.initialization.batchnorm_adaptation import BatchnormAdaptationAlgorithm
@@ -122,6 +123,7 @@ def get_types_of_grouping_ops(self) -> List[str]:
return PTElementwisePruningOp.get_all_op_aliases()
+@api()
@ADAPTIVE_COMPRESSION_CONTROLLERS.register('pt_filter_pruning')
class FilterPruningController(BasePruningAlgoController):
def __init__(self, target_model: NNCFNetwork,
diff --git a/nncf/torch/quantization/algo.py b/nncf/torch/quantization/algo.py
index 90b1c80dc73..3bed0f70805 100644
--- a/nncf/torch/quantization/algo.py
+++ b/nncf/torch/quantization/algo.py
@@ -35,6 +35,7 @@
from nncf.api.compression import CompressionLoss
from nncf.api.compression import CompressionScheduler
from nncf.api.compression import CompressionStage
+from nncf.common.api_marker import api
from nncf.common.graph import NNCFGraph
from nncf.common.graph import NNCFNode
from nncf.common.graph import NNCFNodeName
@@ -1252,6 +1253,7 @@ def init_range(self):
raise NotImplementedError
+@api()
class QuantizationController(QuantizationControllerBase):
def __init__(self, target_model: NNCFNetwork,
config: NNCFConfig,
diff --git a/nncf/torch/sparsity/base_algo.py b/nncf/torch/sparsity/base_algo.py
index 12f45455b96..d668c237d06 100644
--- a/nncf/torch/sparsity/base_algo.py
+++ b/nncf/torch/sparsity/base_algo.py
@@ -17,6 +17,7 @@
from nncf.api.compression import CompressionLoss
from nncf.api.compression import CompressionScheduler
from nncf.api.compression import CompressionStage
+from nncf.common.api_marker import api
from nncf.common.graph import NNCFNode
from nncf.common.graph import NNCFNodeName
from nncf.common.graph.transformations.commands import TargetType
@@ -93,6 +94,7 @@ def initialize(self, model: NNCFNetwork) -> None:
pass
+@api()
class BaseSparsityAlgoController(PTCompressionAlgorithmController, SparsityController):
def __init__(self, target_model: NNCFNetwork, sparsified_module_info: List[SparseModuleInfo]):
super().__init__(target_model)
diff --git a/nncf/torch/sparsity/const/algo.py b/nncf/torch/sparsity/const/algo.py
index 46f28d725c3..020a3c2ac2d 100644
--- a/nncf/torch/sparsity/const/algo.py
+++ b/nncf/torch/sparsity/const/algo.py
@@ -12,6 +12,7 @@
"""
from typing import Tuple
+from nncf.common.api_marker import api
from nncf.common.graph import NNCFNode
from nncf.common.sparsity.statistics import ConstSparsityStatistics
from nncf.common.statistics import NNCFStatistics
@@ -35,6 +36,7 @@ def _are_frozen_layers_allowed(self) -> Tuple[bool, str]:
return True, 'Frozen layers are allowed for const sparsity'
+@api()
class ConstSparsityController(BaseSparsityAlgoController):
def freeze(self):
pass
diff --git a/nncf/torch/sparsity/magnitude/algo.py b/nncf/torch/sparsity/magnitude/algo.py
index 01936432eaa..b88552e3695 100644
--- a/nncf/torch/sparsity/magnitude/algo.py
+++ b/nncf/torch/sparsity/magnitude/algo.py
@@ -18,6 +18,7 @@
from nncf import NNCFConfig
from nncf.api.compression import CompressionStage
from nncf.common.accuracy_aware_training.training_loop import ADAPTIVE_COMPRESSION_CONTROLLERS
+from nncf.common.api_marker import api
from nncf.common.graph import NNCFNode
from nncf.common.initialization.batchnorm_adaptation import BatchnormAdaptationAlgorithm
from nncf.common.schedulers import StubCompressionScheduler
@@ -51,6 +52,7 @@ def _build_controller(self, model: NNCFNetwork) -> PTCompressionAlgorithmControl
return MagnitudeSparsityController(model, self._sparsified_module_info, self.config)
+@api()
@ADAPTIVE_COMPRESSION_CONTROLLERS.register('pt_magnitude_sparsity')
class MagnitudeSparsityController(BaseSparsityAlgoController):
def __init__(self, target_model: NNCFNetwork, sparsified_module_info: List[SparseModuleInfo],
diff --git a/nncf/torch/sparsity/rb/algo.py b/nncf/torch/sparsity/rb/algo.py
index e9be7b76999..c7b21e46350 100644
--- a/nncf/torch/sparsity/rb/algo.py
+++ b/nncf/torch/sparsity/rb/algo.py
@@ -17,6 +17,7 @@
import torch.distributed as dist
from nncf import NNCFConfig
+from nncf.common.api_marker import api
from nncf.config.extractors import extract_algo_specific_config
from nncf.config.schemata.defaults import SPARSITY_INIT
from nncf.config.schemata.defaults import SPARSITY_LEVEL_SETTING_MODE
@@ -48,6 +49,7 @@ def _build_controller(self, model: NNCFNetwork) -> PTCompressionAlgorithmControl
return RBSparsityController(model, self._sparsified_module_info, self.config)
+@api()
@ADAPTIVE_COMPRESSION_CONTROLLERS.register('pt_rb_sparsity')
class RBSparsityController(BaseSparsityAlgoController):
def __init__(self, target_model: NNCFNetwork, sparsified_module_info: List[SparseModuleInfo],
diff --git a/nncf/torch/statistics/__init__.py b/nncf/torch/statistics/__init__.py
new file mode 100644
index 00000000000..8727b935935
--- /dev/null
+++ b/nncf/torch/statistics/__init__.py
@@ -0,0 +1,12 @@
+"""
+ Copyright (c) 2023 Intel Corporation
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
diff --git a/nncf/torch/structures.py b/nncf/torch/structures.py
index 616e1518c89..5cacf5d3d98 100644
--- a/nncf/torch/structures.py
+++ b/nncf/torch/structures.py
@@ -17,9 +17,11 @@
from torch.nn.modules.loss import _Loss
from torch.utils.data import DataLoader
+from nncf.common.api_marker import api
from nncf.config.structures import NNCFExtraConfigStruct
+@api()
class QuantizationPrecisionInitArgs(NNCFExtraConfigStruct):
"""
Stores arguments for initialization of quantization's bitwidth.
@@ -55,6 +57,7 @@ def get_id(cls) -> str:
return "quantization_precision_init_args"
+@api()
class AutoQPrecisionInitArgs(NNCFExtraConfigStruct):
"""
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
@@ -79,6 +82,7 @@ def get_id(cls) -> str:
return "autoq_precision_init_args"
+@api()
class LeGRInitArgs(NNCFExtraConfigStruct):
"""
Stores arguments for learning global ranking in pruning algorithm.
@@ -112,6 +116,7 @@ def get_id(cls) -> str:
return "legr_init_args"
+@api()
class DistributedCallbacksArgs(NNCFExtraConfigStruct):
"""
A pair of callbacks that is needed for distributed training of the model: wrapping model with wrapping_callback for
@@ -132,6 +137,7 @@ def get_id(cls) -> str:
return "distributed_callbacks_args"
+@api()
class ExecutionParameters:
"""
Parameters that are necessary for distributed training of the model.