diff --git a/.github/workflows/push-pot.yml b/.github/workflows/push-pot.yml
new file mode 100644
index 0000000000..1787996ee1
--- /dev/null
+++ b/.github/workflows/push-pot.yml
@@ -0,0 +1,35 @@
+name: Push POTs
+on:
+ push:
+ branches:
+ - '3.0'
+ - 'lastochka42/update-po'
+permissions:
+ contents: write
+jobs:
+ generate-pot:
+ runs-on: ubuntu-latest
+ container: tarantool/doc-builder:fat-4.3
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Generate Portable Object Templates
+ run: |
+ cmake .
+ make update-po
+
+ - name: Commit generated pots
+ run: |
+ git config --global --add safe.directory /__w/doc/doc
+ git config --global user.name 'TarantoolBot'
+ git config --global user.email 'tarantoolbot@mail.ru'
+
+ if [[ $(git status) =~ .*"nothing to commit".* ]]; then
+ echo "status=nothing-to-commit"
+ exit 0
+ fi
+
+ git add locale/
+ git commit -m "updated locale"
+ git push origin lastochka42/update-po
+
diff --git a/.gitignore b/.gitignore
index 6625ca321a..006e72cd83 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ webhooks/.env
locale/*
!locale/ru
+!locale/en
# redundant folders created by sphinx
diff --git a/.gitmodules b/.gitmodules
index cd300b8328..3955ade5ce 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,9 +1,3 @@
-[submodule "modules/cartridge"]
- path = modules/cartridge
- url = https://github.com/tarantool/cartridge.git
-[submodule "modules/cartridge-cli"]
- path = modules/cartridge-cli
- url = https://github.com/tarantool/cartridge-cli.git
[submodule "modules/metrics"]
path = modules/metrics
url = https://github.com/tarantool/metrics.git
diff --git a/build_submodules.sh b/build_submodules.sh
index a083ebfc47..e19e7258a0 100755
--- a/build_submodules.sh
+++ b/build_submodules.sh
@@ -12,50 +12,6 @@ po_dest="${project_root}/locale/ru/LC_MESSAGES"
cp README.rst doc/contributing/docs/_includes/README.rst
-# Cartridge
-cartridge_root="${project_root}/modules/cartridge"
-
-# Build Cartridge to extract docs
-cd "${cartridge_root}" || exit
-CMAKE_DUMMY_WEBUI=true tarantoolctl rocks make
-
-# Copy Cartridge docs, including diagrams and images
-cartridge_rst_src="${cartridge_root}/build.luarocks/build.rst"
-cartridge_rst_dest="${project_root}/doc/book/cartridge"
-cd "${cartridge_rst_src}" || exit
-mkdir -p "${cartridge_rst_dest}"
-find . -iregex '.*\.\(rst\|png\|puml\|svg\)$' -exec cp -r --parents {} "${cartridge_rst_dest}" \;
-
-# Copy translation templates
-cartridge_pot_src="${cartridge_root}/build.luarocks/build.rst/locale"
-cartridge_pot_dest="${project_root}/locale/book/cartridge"
-cd "${cartridge_pot_src}" || exit
-mkdir -p "${cartridge_pot_dest}"
-find . -name '*.pot' -exec cp -rv --parents {} "${cartridge_pot_dest}" \;
-
-# Copy translations
-cartridge_po_src="${cartridge_root}/build.luarocks/build.rst/locale/ru/LC_MESSAGES"
-cartridge_po_dest="${po_dest}/book/cartridge"
-cd "${cartridge_po_src}" || exit
-mkdir -p "${cartridge_po_dest}"
-find . -name '*.po' -exec cp -rv --parents {} "${cartridge_po_dest}" \;
-
-
-# Cartridge CLI
-cartridge_cli_root="${project_root}/modules/cartridge-cli/doc"
-cartridge_cli_dest="${cartridge_rst_dest}/cartridge_cli"
-cartridge_cli_po_dest="${po_dest}/book/cartridge/cartridge_cli"
-
-# Copy Cartridge CLI docs, including diagrams and images
-mkdir -p "${cartridge_cli_dest}"
-cd ${cartridge_cli_root} || exit
-find . -iregex '.*\.\(rst\|png\|puml\|svg\)$' -exec cp -rv --parents {} "${cartridge_cli_dest}" \;
-
-# Copy translations
-mkdir -p "${cartridge_cli_po_dest}"
-cd "${cartridge_cli_root}/locale/ru/LC_MESSAGES/doc/" || exit
-find . -name '*.po' -exec cp -rv --parents {} "${cartridge_cli_po_dest}" \;
-
# Monitoring
monitoring_root="${project_root}/modules/metrics/doc/monitoring"
monitoring_dest="${project_root}/doc/book"
diff --git a/conf.py b/conf.py
index a053d601d2..dfce50f509 100644
--- a/conf.py
+++ b/conf.py
@@ -61,7 +61,7 @@
project = u'Tarantool'
# |release| The full version, including alpha/beta/rc tags.
-release = "2.11.1"
+release = "3.0.0"
# |version| The short X.Y version.
version = '.'.join(release.split('.')[0:2])
@@ -73,10 +73,6 @@
'how-to/using_docker.rst',
'reference/configuration/cfg_*',
'images',
- 'book/cartridge/cartridge_overview.rst',
- 'book/cartridge/CONTRIBUTING.rst',
- 'book/cartridge/topics',
- 'book/cartridge/cartridge_api/modules/cartridge.test-helpers.rst',
'reference/reference_rock/luatest/README.rst',
'reference/reference_rock/luatest/modules/luatest.rst',
'**/_includes/*'
diff --git a/doc/alternate_build_master.rst b/doc/alternate_build_master.rst
index 6effb4edc0..002791508b 100644
--- a/doc/alternate_build_master.rst
+++ b/doc/alternate_build_master.rst
@@ -14,7 +14,6 @@
how-to/index
concepts/index
CRUD operations
- book/cartridge/index
book/admin/index
book/connectors
enterprise/index
diff --git a/doc/book/admin/admin_instances_dev.png b/doc/book/admin/admin_instances_dev.png
new file mode 100644
index 0000000000..7461ae40f0
Binary files /dev/null and b/doc/book/admin/admin_instances_dev.png differ
diff --git a/doc/book/admin/admin_instances_prod.png b/doc/book/admin/admin_instances_prod.png
new file mode 100644
index 0000000000..143cd9f832
Binary files /dev/null and b/doc/book/admin/admin_instances_prod.png differ
diff --git a/doc/book/admin/instance_config.rst b/doc/book/admin/instance_config.rst
index 2e4df21b38..35bacc21a2 100644
--- a/doc/book/admin/instance_config.rst
+++ b/doc/book/admin/instance_config.rst
@@ -1,177 +1,204 @@
.. _admin-instance_config:
+.. _admin-instance-environment-overview:
+.. _admin-tt_config_file:
-Instance configuration
-======================
+Application environment
+=======================
-For each Tarantool instance, you need two files:
+This section provides a high-level overview on how to prepare a Tarantool application for deployment
+and how the application's environment and layout might look.
+This information is helpful for understanding how to administer Tarantool instances using :ref:`tt CLI ` in both development and production environments.
-* [Optional] An :ref:`application file ` with
- instance-specific logic. Put this file into the ``/usr/share/tarantool/``
- directory.
+The main steps of creating and preparing the application for deployment are:
- For example, ``/usr/share/tarantool/my_app.lua`` (here we implement it as a
- :ref:`Lua module ` that bootstraps the database and
- exports ``start()`` function for API calls):
+1. :ref:`admin-instance_config-init-environment`.
- .. code-block:: lua
+2. :ref:`admin-instance_config-develop-app`.
- local function start()
- box.schema.space.create("somedata")
- box.space.somedata:create_index("primary")
- <...>
- end
+3. :ref:`admin-instance_config-package-app`.
- return {
- start = start;
- }
+In this section, a `sharded_cluster `_ application is used as an example.
+This cluster includes 5 instances: one router and 4 storages, which constitute two replica sets.
-* An :ref:`instance file ` with
- instance-specific initialization logic and parameters. Put this file, or a
- symlink to it, into the **instance directory**
- (see ``instances_enabled`` parameter in :ref:`tt configuration file `).
+.. image:: admin_instances_dev.png
+ :align: left
+ :width: 700
+ :alt: Cluster topology
- For example, ``/etc/tarantool/instances.enabled/my_app.lua`` (here we load
- ``my_app.lua`` module and make a call to ``start()`` function from that
- module):
- .. code-block:: lua
- #!/usr/bin/env tarantool
+.. _admin-instance_config-init-environment:
+.. _admin-start_stop_instance-running_locally:
- box.cfg {
- listen = 3301;
- }
+Initializing a local environment
+--------------------------------
- -- load my_app module and call start() function
- -- with some app options controlled by sysadmins
- local m = require('my_app').start({...})
+Before creating an application, you need to set up a local environment for ``tt``:
-.. _admin-instance_file:
+1. Create a home directory for the environment.
-Instance file
--------------
+2. Run ``tt init`` in this directory:
+
+ .. code-block:: console
-After this short introduction, you may wonder what an instance file is, what it
-is for, and how ``tt`` uses it. After all, Tarantool is an application
-server, so why not start the application stored in ``/usr/share/tarantool``
-directly?
+ ~/myapp$ tt init
+ • Environment config is written to 'tt.yaml'
-A typical Tarantool application is not a script, but a daemon running in
-background mode and processing requests, usually sent to it over a TCP/IP
-socket. This daemon needs to be started automatically when the operating system
-starts, and managed with the operating system standard tools for service
-management -- such as ``systemd`` or ``init.d``. To serve this very purpose, we
-created **instance files**.
+This command creates a default ``tt`` configuration file ``tt.yaml`` for a local
+environment and the directories for applications, control sockets, logs, and other
+artifacts:
-You can have more than one instance file. For example, a single application in
-``/usr/share/tarantool`` can run in multiple instances, each of them having its
-own instance file. Or you can have multiple applications in
-``/usr/share/tarantool`` -- again, each of them having its own instance file.
+.. code-block:: console
-An instance file is typically created by a system administrator. An application
-file is often provided by a developer, in a Lua rock or an rpm/deb package.
+ ~/myapp$ ls
+ bin distfiles include instances.enabled modules templates tt.yaml
-An instance file is designed to not differ in any way from a Lua application.
-It must, however, configure the database, i.e. contain a call to
-:doc:`box.cfg{} ` somewhere in it, because it’s the
-only way to turn a Tarantool script into a background process, and
-``tt`` is a tool to manage background processes. Other than that, an
-instance file may contain arbitrary Lua code, and, in theory, even include the
-entire application business logic in it. We, however, do not recommend this,
-since it clutters the instance file and leads to unnecessary copy-paste when
-you need to run multiple instances of an application.
+Find detailed information about the ``tt`` configuration parameters and launch modes
+on the :ref:`tt configuration page `.
-.. _admin-tt-preload:
-Preloading Lua scripts and modules
-----------------------------------
-Tarantool supports loading and running chunks of Lua code before the loading instance file.
-To load or run Lua code immediately upon Tarantool startup, specify the ``TT_PRELOAD``
-environment variable. Its value can be either a path to a Lua script or a Lua module name:
+.. _admin-instance_config-develop-app:
+.. _admin-start_stop_instance-multi-instance:
+.. _admin-start_stop_instance-multi-instance-layout:
-* To run the Lua script ``script.lua`` from the ``preload/path/`` directory inside
- the working directory in Tarantool before ``main.lua``, set ``TT_PRELOAD`` as follows:
+Creating and developing an application
+--------------------------------------
- .. code-block:: console
+You can create an application in two ways:
- $ TT_PRELOAD=/preload/path/script.lua tarantool main.lua
+- Manually by preparing its layout in a directory inside ``instances_enabled``.
+ The directory name is used as the application identifier.
- Tarantool runs the ``script.lua`` code, waits for it to complete, and
- then starts running ``main.lua``.
+- From a template by using the :ref:`tt create ` command.
-* To load the ``preload.module`` into the Tarantool Lua interpreter
- executing ``main.lua``, set ``TT_PRELOAD`` as follows:
+In this example, the application's layout is prepared manually and looks as follows.
- .. code-block:: console
+.. code-block:: console
+
+ ~/myapp$ tree
+ .
+ ├── bin
+ ├── distfiles
+ ├── include
+ ├── instances.enabled
+ │ └── sharded_cluster
+ │ ├── config.yaml
+ │ ├── instances.yaml
+ │ ├── router.lua
+ │ ├── sharded_cluster-scm-1.rockspec
+ │ └── storage.lua
+ ├── modules
+ ├── templates
+ └── tt.yaml
+
+
+The ``sharded_cluster`` directory contains the following files:
- $ TT_PRELOAD=preload.module tarantool main.lua
+- ``config.yaml``: contains the :ref:`configuration ` of the cluster. This file might include the entire cluster topology or provide connection settings to a centralized configuration storage.
+- ``instances.yml``: specifies instances to run in the current environment. For example, on the developer’s machine, this file might include all the instances defined in the cluster configuration. In the production environment, this file includes :ref:`instances to run on the specific machine `.
+- ``router.lua``: includes code specific for a :ref:`router `.
+- ``sharded_cluster-scm-1.rockspec``: specifies the required external dependencies (for example, ``vshard``).
+- ``storage.lua``: includes code specific for :ref:`storages `.
- Tarantool loads the ``preload.module`` code into the interpreter and
- starts running ``main.lua`` as if its first statement was ``require('preload.module')``.
+You can find the full example here:
+`sharded_cluster `_.
+
+
+
+.. _admin-instance_config-package-app:
+.. _admin-instance-app-layout:
+.. _admin-instance_file:
- .. warning::
+Packaging the application
+-------------------------
- ``TT_PRELOAD`` values that end with ``.lua`` are considered scripts,
- so avoid module names with this ending.
+To package the ready application, use the :ref:`tt pack ` command.
+This command can create an installable DEB/RPM package or generate ``.tgz`` archive.
-To load several scripts or modules, pass them in a single quoted string, separated
-by semicolons:
+The structure below reflects the content of the packed ``.tgz`` archive for the `sharded_cluster `_ application:
.. code-block:: console
- $ TT_PRELOAD="/preload/path/script.lua;preload.module" tarantool main.lua
+ ~/myapp$ tree -a
+ .
+ ├── bin
+ │ ├── tarantool
+ │ └── tt
+ ├── include
+ ├── instances.enabled
+ │ └── sharded_cluster -> ../sharded_cluster
+ ├── modules
+ ├── sharded_cluster
+ │ ├── .rocks
+ │ │ └── share
+ │ │ └── ...
+ │ ├── config.yaml
+ │ ├── instances.yaml
+ │ ├── router.lua
+ │ ├── sharded_cluster-scm-1.rockspec
+ │ └── storage.lua
+ └── tt.yaml
-In the preload script, the three dots (``...``) value contains the module name
-if you're preloading a module or the path to the script if you're running a script.
-The :ref:`arg ` value from the main script is visible in
-the preload script or module.
+The application's layout looks similar to the one defined when :ref:`developing the application ` with some differences:
-For example, when preloading this script:
+- ``bin``: contains the ``tarantool`` and ``tt`` binaries packed with the application bundle.
-.. code-block:: lua
+- ``instances.enabled``: contains a symlink to the packed ``sharded_cluster`` application.
- -- preload.lua --
- print("Preloading:")
- print("... arg is:", ...)
- print("Passed args:", arg[1], arg[2])
+- ``sharded_cluster``: a packed application. In addition to files created during the application development, includes the ``.rocks`` directory containing application dependencies (for example, ``vshard``).
-You get the following output:
+- ``tt.yaml``: a ``tt`` configuration file.
-.. code-block:: console
- $ TT_PRELOAD=preload.lua tarantool main.lua arg1 arg2
- Preloading:
- ... arg is: preload.lua
- Passed args: arg1 arg2
- 'strip_core' is set but unsupported
- ... main/103/main.lua I> Tarantool 2.11.0-0-g247a9a4 Darwin-x86_64-Release
- ... main/103/main.lua I> log level 5
- ... main/103/main.lua I> wal/engine cleanup is paused
- < ... >
-If an error happens during the execution of the preload script or module, Tarantool
-reports the problem and exits.
+.. _admin-instances_to_run:
-.. _admin-tt_config_file:
+Instances to run
+~~~~~~~~~~~~~~~~
-tt configuration file
----------------------
+One more difference for a deployed application is the content of the ``instances.yaml`` file that specifies instances to run in the current environment.
-While instance files contain instance configuration, the :ref:`tt ` configuration file
-contains the configuration that ``tt`` uses to set up the application environment.
-This includes the path to instance files, various working directories, and other
-parameters that connect the application to the system.
+- On the developer's machine, this file might include all the instances defined in the cluster configuration.
-To create a default ``tt`` configuration, run ``tt init``. This creates a ``tt.yaml``
-configuration file. Its location depends on the :ref:`tt launch mode `
-(system or local).
+ .. image:: admin_instances_dev.png
+ :align: left
+ :width: 700
+ :alt: Cluster topology
-Some ``tt`` configuration parameters are similar to those used by
-:doc:`box.cfg{} `, for example, ``memxt_dir``
-or ``wal_dir``. Other parameters define the ``tt`` environment, for example,
-paths to installation files used by ``tt`` or to connected :ref:`external modules `.
+ ``instances.yaml``:
-Find the detailed information about the ``tt`` configuration parameters and launch modes
-on the :ref:`tt configuration page `.
+ .. literalinclude:: /code_snippets/snippets/sharding/instances.enabled/sharded_cluster/instances.yaml
+ :language: yaml
+ :dedent:
+
+- In the production environment, this file includes instances to run on the specific machine.
+
+ .. image:: admin_instances_prod.png
+ :align: left
+ :width: 700
+ :alt: Cluster topology
+
+ ``instances.yaml`` (Server-001):
+
+ .. code-block:: yaml
+
+ router-a-001:
+
+ ``instances.yaml`` (Server-002):
+
+ .. code-block:: yaml
+
+ storage-a-001:
+ storage-b-001:
+
+ ``instances.yaml`` (Server-003):
+
+ .. code-block:: yaml
+
+ storage-a-002:
+ storage-b-002:
+
+
+The :ref:`Starting and stopping instances ` section describes how to start and stop Tarantool instances.
diff --git a/doc/book/admin/start_stop_instance.rst b/doc/book/admin/start_stop_instance.rst
index 759e16f784..e84ef23cdc 100644
--- a/doc/book/admin/start_stop_instance.rst
+++ b/doc/book/admin/start_stop_instance.rst
@@ -3,333 +3,387 @@
Starting and stopping instances
===============================
-To start a Tarantool instance from an :ref:`instance file `
-using the :ref:`tt ` utility:
+This section describes how to manage instances in a Tarantool cluster using the :ref:`tt ` utility.
+A cluster can include multiple instances that run different code.
+A typical example is a cluster application that includes router and storage instances.
+Particularly, you can perform the following actions:
-1. Place the instance file (for example, ``my_app.lua``) into ``/etc/tarantool/instances.enabled/``.
- This is the default location where ``tt`` searches for instance files.
+* start all instances in a cluster or only specific ones
+* check the status of instances
+* connect to a specific instance
+* stop all instances or only specific ones
-2. Run ``tt start``:
+To get more context on how the application's environment might look, refer to :ref:`Application environment `.
- .. code-block:: console
+.. NOTE::
- $ tt start
- • Starting an instance [my_app]...
+ In this section, a `sharded_cluster `_ application is used to demonstrate how to start, stop, and manage instances in a cluster.
-In this case, ``tt`` starts an instance from any ``*.lua`` file it finds in ``/etc/tarantool/instances.enabled/``.
-Starting instances
-------------------
+.. _configuration_run_instance:
-All the instance files or directories placed in the ``instances_enabled`` directory
-specified in :ref:`tt configuration ` are called *enabled instances*.
-If there are several enabled instances, ``tt start`` starts a separate Tarantool
-instance for each of them.
+Starting Tarantool instances
+----------------------------
-Learn more about working with multiple Tarantool instances in
-:ref:`Multi-instance applications `.
+.. _configuration_run_instance_tt:
-To start a specific enabled instance, specify its name in the ``tt start`` argument:
+Starting instances using the tt utility
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. code-block:: console
+The :ref:`tt ` utility is the recommended way to start Tarantool instances.
- $ tt start my_app
- • Starting an instance [my_app]...
+.. code-block:: console
-When starting an instance, ``tt`` uses its :ref:`configuration file `
-``tt.yaml`` to set up a :ref:`tt environment ` in which the instance runs.
-The default ``tt`` configuration file is created automatically in ``/etc/tarantool/``.
-Learn how to set up a ``tt`` environment in a directory of your choice in
-:ref:`Running Tarantool locally `.
+ $ tt start sharded_cluster
+ • Starting an instance [sharded_cluster:storage-a-001]...
+ • Starting an instance [sharded_cluster:storage-a-002]...
+ • Starting an instance [sharded_cluster:storage-b-001]...
+ • Starting an instance [sharded_cluster:storage-b-002]...
+ • Starting an instance [sharded_cluster:router-a-001]...
-After the instance has started and worked for some time, you can find its artifacts
+After the cluster has started and worked for some time, you can find its artifacts
in the directories specified in the ``tt`` configuration. These are the default
-locations:
+locations in the local :ref:`launch mode `:
+
+* ``sharded_cluster/var/log//`` -- instance :ref:`logs `.
+* ``sharded_cluster/var/lib//`` -- :ref:`snapshots and write-ahead logs `.
+* ``sharded_cluster/var/run//`` -- control sockets and PID files.
+
+In the system launch mode, artifacts are created in these locations:
+
+* ``/var/log/tarantool//``
+* ``/var/lib/tarantool//``
+* ``/var/run/tarantool//``
+
+
+.. _configuration_run_instance_tarantool:
+
+Starting an instance using the tarantool command
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``tarantool`` command provides additional :ref:`options ` that might be helpful for development purposes.
+Below is the syntax for starting a Tarantool instance configured in a file:
+
+.. code-block:: console
-* ``/var/log/tarantool/.log`` -- instance :ref:`logs `.
-* ``/var/lib/tarantool//`` -- snapshots and write-ahead logs.
-* ``/var/run/tarantool/.control`` -- a control socket. This is
- a Unix socket with the Lua console attached to it. This file is used to connect
- to the instance console.
-* ``/var/run/tarantool/.pid`` -- a PID file that ``tt`` uses to
- check the instance status and send control commands.
+ $ tarantool [OPTION ...] --name INSTANCE_NAME --config CONFIG_FILE_PATH
+
+The command below starts ``router-a-001`` configured in the ``config.yaml`` file:
+
+.. code-block:: console
+
+ $ tarantool --name router-a-001 --config config.yaml
+
+
+
+.. _admin-start_stop_instance_management:
Basic instance management
-------------------------
-.. note::
+Most of the commands described in this section can be called with or without an instance name.
+Without the instance name, they are executed for all instances defined in ``instances.yaml``.
- These commands can be called without an instance name. In this case, they are
- executed for all enabled instances.
-``tt`` provides a set of commands for performing basic operations over instances:
+.. _admin-start_stop_instance_check_status:
-* ``tt check`` -- check the instance file for syntax errors:
+Checking an instance's status
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- .. code-block:: console
+To check the status of instances, execute :ref:`tt status `:
- $ tt check my_app
- • Result of check: syntax of file '/etc/tarantool/instances.enabled/my_app.lua' is OK
+.. code-block:: console
-* ``tt status`` -- check the instance status:
+ $ tt status sharded_cluster
+ INSTANCE STATUS PID
+ sharded_cluster:storage-a-001 RUNNING 2023
+ sharded_cluster:storage-a-002 RUNNING 2026
+ sharded_cluster:storage-b-001 RUNNING 2020
+ sharded_cluster:storage-b-002 RUNNING 2021
+ sharded_cluster:router-a-001 RUNNING 2022
- .. code-block:: console
+To check the status of a specific instance, you need to specify its name:
- $ tt status my_app
- INSTANCE STATUS PID
- my_app NOT RUNNING
+.. code-block:: console
-* ``tt restart`` -- restart the instance:
+ $ tt status sharded_cluster:storage-a-001
+ INSTANCE STATUS PID
+ sharded_cluster:storage-a-001 RUNNING 2023
- .. code-block:: console
- $ tt restart my_app -y
- • The Instance my_app (PID = 729) has been terminated.
- • Starting an instance [my_app]...
+.. _admin-start_stop_instance_connect:
- The ``-y`` option responds "yes" to the confirmation prompt automatically.
+Connecting to an instance
+~~~~~~~~~~~~~~~~~~~~~~~~~
-* ``tt stop`` -- stop the instance:
+To connect to the instance, use the :ref:`tt connect ` command:
- .. code-block:: console
+.. code-block:: console
- $ tt stop my_app
- • The Instance my_app (PID = 639) has been terminated.
+ $ tt connect sharded_cluster:storage-a-001
+ • Connecting to the instance...
+ • Connected to sharded_cluster:storage-a-001
-* ``tt clean`` -- remove instance artifacts: logs, snapshots, and other files.
+ sharded_cluster:storage-a-001>
- .. code-block:: console
+In the instance's console, you can execute commands provided by the :ref:`box ` module.
+For example, :ref:`box.info ` can be used to get various information about a running instance:
+
+.. code-block:: console
+
+ sharded_cluster:storage-a-001> box.info.ro
+ ---
+ - false
+ ...
- $ tt clean my_app -f
- • List of files to delete:
- • /var/log/tarantool/my_app.log
- • /var/lib/tarantool/my_app/00000000000000000000.snap
- • /var/lib/tarantool/my_app/00000000000000000000.xlog
- The ``-f`` option removes the files without confirmation.
+.. _admin-start_stop_instance_restart:
-.. _admin-start_stop_instance-multi-instance:
+Restarting instances
+~~~~~~~~~~~~~~~~~~~~
-Multi-instance applications
----------------------------
+To restart an instance, use :ref:`tt restart `:
-Tarantool applications can include multiple instances that run different code.
-A typical example is a cluster application that includes router and storage
-instances. The ``tt`` utility enables managing such applications.
-With a single ``tt`` call, you can:
+.. code-block:: console
+
+ $ tt restart sharded_cluster:storage-a-002
-* start an application on multiple instances
-* check the status of application instances
-* connect to a specific instance of an application
-* stop a specific instance of an application or all its instances
+After executing ``tt restart``, you need to confirm this operation:
-Application layout
+.. code-block:: console
+
+ Confirm restart of 'sharded_cluster:storage-a-002' [y/n]: y
+ • The Instance sharded_cluster:storage-a-002 (PID = 2026) has been terminated.
+ • Starting an instance [sharded_cluster:storage-a-002]...
+
+
+.. _admin-start_stop_instance_stop:
+
+Stopping instances
~~~~~~~~~~~~~~~~~~
-To create a multi-instance application, prepare its layout
-in a directory inside ``instances_enabled``. The directory name is used as
-the application identifier.
+To stop the specific instance, use :ref:`tt stop ` as follows:
-This directory should contain the following files:
+.. code-block:: console
-* The default instance file named ``init.lua``. This file is used for all
- instances of the application unless there are specific instance files (see below).
-* The instances configuration file ``instances.yml`` with instance names followed by colons:
+ $ tt stop sharded_cluster:storage-a-002
- .. code-block:: yaml
+You can also stop all the instances at once as follows:
- :
- :
- ...
+.. code-block:: console
- .. note::
+ $ tt stop sharded_cluster
+ • The Instance sharded_cluster:storage-b-001 (PID = 2020) has been terminated.
+ • The Instance sharded_cluster:storage-b-002 (PID = 2021) has been terminated.
+ • The Instance sharded_cluster:router-a-001 (PID = 2022) has been terminated.
+ • The Instance sharded_cluster:storage-a-001 (PID = 2023) has been terminated.
+ • can't "stat" the PID file. Error: "stat /home/testuser/myapp/instances.enabled/sharded_cluster/var/run/storage-a-002/tt.pid: no such file or directory"
- Do not use the dot (``.``) and dash (``-``) characters in the instance names.
- They are reserved for system use.
+.. note::
-* (Optional) Specific instances files.
- These files should have names ``.init.lua``, where ````
- is the name specified in ``instances.yml``.
- For example, if your application has separate source files for the ``router`` and ``storage``
- instances, place the router code in the ``router.init.lua`` file.
+ The error message indicates that ``storage-a-002`` is already not running.
-For example, take a ``demo`` application that has three instances:``storage1``,
-``storage2``, and ``router``. Storage instances share the same code, and ``router`` has its own.
-The application directory ``demo`` inside ``instances_enabled`` must contain the following files:
-* ``instances.yml`` -- the instances configuration:
+.. _admin-start_stop_instance_remove_artifacts:
- .. code-block:: yaml
+Removing instance artifacts
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
- storage1:
- storage2:
- router:
+The :ref:`tt clean ` command removes instance artifacts (such as logs or snapshots):
-* ``init.lua`` -- the code of ``storage1`` and ``storage2``
-* ``router.init.lua`` -- the code of ``router``
+.. code-block:: console
+ $ tt clean sharded_cluster
+ • List of files to delete:
-Identifying instances in code
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ • /home/testuser/myapp/instances.enabled/sharded_cluster/var/log/storage-a-001/tt.log
+ • /home/testuser/myapp/instances.enabled/sharded_cluster/var/lib/storage-a-001/00000000000000001062.snap
+ • /home/testuser/myapp/instances.enabled/sharded_cluster/var/lib/storage-a-001/00000000000000001062.xlog
+ • ...
-When the application is working, each instance has associated environment variables
-``TARANTOOL_INSTANCE_NAME`` and ``TARANTOOL_APP_NAME``. You can use them in the application
-code to identify the instance on which the code runs.
+ Confirm [y/n]:
-To obtain the instance and application names, use the following code:
+Enter ``y`` and press ``Enter`` to confirm removing of artifacts for each instance.
-.. code:: lua
+.. note::
- local inst_name = os.getenv('TARANTOOL_INSTANCE_NAME')
- local app_name = os.getenv('TARANTOOL_APP_NAME')
+ The ``-f`` option of the ``tt clean`` command can be used to remove the files without confirmation.
-Managing multi-instance applications
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Start all three instances of the ``demo`` application:
-.. code-block:: console
- $ tt start demo
- • Starting an instance [demo:router]...
- • Starting an instance [demo:storage1]...
- • Starting an instance [demo:storage2]...
+.. _admin-tt-preload:
-Check the status of ``demo`` instances:
+Preloading Lua scripts and modules
+----------------------------------
-.. code-block:: console
+Tarantool supports loading and running chunks of Lua code before starting instances.
+To load or run Lua code immediately upon Tarantool startup, specify the ``TT_PRELOAD``
+environment variable. Its value can be either a path to a Lua script or a Lua module name:
- $ tt status demo
- INSTANCE STATUS PID
- demo:router RUNNING 55
- demo:storage1 RUNNING 56
- demo:storage2 RUNNING 57
+* To run the Lua script ``preload_script.lua`` from the ``sharded_cluster`` directory, set ``TT_PRELOAD`` as follows:
-Check the status of a specific instance:
+ .. code-block:: console
-.. code-block:: console
+ $ TT_PRELOAD=preload_script.lua tt start sharded_cluster
- $ tt status demo:router
- INSTANCE STATUS PID
- demo:router RUNNING 55
+ Tarantool runs the ``preload_script.lua`` code, waits for it to complete, and
+ then starts instances.
-Connect to an instance:
+* To load the ``preload_module`` from the ``sharded_cluster`` directory, set ``TT_PRELOAD`` as follows:
-.. code-block:: console
+ .. code-block:: console
- $ tt connect demo:router
- • Connecting to the instance...
- • Connected to /var/run/tarantool/demo/router/router.control
+ $ TT_PRELOAD=preload_module tt start sharded_cluster
- /var/run/tarantool/demo/router/router.control>
+ .. note::
-Stop a specific instance:
+ ``TT_PRELOAD`` values that end with ``.lua`` are considered scripts,
+ so avoid module names with this ending.
-.. code-block:: console
+To load several scripts or modules, pass them in a single quoted string, separated
+by semicolons:
- $ tt stop demo:storage1
- • The Instance demo:storage1 (PID = 56) has been terminated.
+.. code-block:: console
-Stop all running instances of the ``demo`` application:
+ $ TT_PRELOAD="preload_script.lua;preload_module" tt start sharded_cluster
-.. code-block:: console
+If an error happens during the execution of the preload script or module, Tarantool
+reports the problem and exits.
- $ tt stop demo
- • The Instance demo:router (PID = 55) has been terminated.
- • can't "stat" the PID file. Error: "stat /var/run/tarantool/demo/storage1/storage1.pid: no such file or directory"
- • The Instance demo:storage2 (PID = 57) has been terminated.
-.. note::
- The error message indicates that ``storage1`` is already not running.
-.. _admin-start_stop_instance-running_locally:
+.. _configuration_command_options:
-Running Tarantool locally
--------------------------
+tarantool command-line options
+------------------------------
-Sometimes you may need to run a Tarantool instance locally, for example, for test
-purposes. ``tt`` runs in a local environment if it finds a ``tt.yaml`` configuration
-file in the current directory or any of its enclosing directories.
+Options that can be passed when :ref:`starting a Tarantool instance `:
-To set up a local environment for ``tt``:
+.. option:: -h, --help
-1. Create a home directory for the environment.
+ Print an annotated list of all available options and exit.
-2. Run ``tt init`` in this directory:
+.. option:: --help-env-list
- .. code-block:: console
+ **Since:** :doc:`3.0.0 `.
- $ tt init
- • Environment config is written to 'tt.yaml'
+ Show a list of :ref:`environment variables ` that can be used to configure Tarantool.
-This command creates a default ``tt`` configuration file ``tt.yaml`` for a local
-environment and the directories for instance files, control sockets, logs, and other
-artifacts:
+.. _index-tarantool_version:
-.. code-block:: console
+.. option:: -v, -V, --version
- $ ls
- bin distfiles include instances.enabled modules templates tt.yaml
+ Print the product name and version.
-To run a Tarantool instance in the local environment:
+ **Example**
-1. Place the instance file into the ``instances.enabled/`` directory inside the
- current directory.
+ .. code-block:: console
-2. Run ``tt start``:
+ $ tarantool --version
+ Tarantool Enterprise 3.0.0-beta1-2-gcbb569b4c-r607-gc64
+ Target: Linux-x86_64-RelWithDebInfo
+ ...
- .. code-block:: console
+ In this example:
- $ tt start
+ * ``3.0.0`` is a Tarantool version.
+ Tarantool follows semantic versioning, which is described in the :ref:`Tarantool release policy ` section.
-After the instance is started, you can find its artifacts in their locations inside
-the current directory:
+ * ``Target`` is the platform Tarantool is built on.
+ Platform-specific details may follow this line.
-* logs in ``var/log/``
-* snapshots and write-ahead logs in ``var/lib/``
-* control sockets and PID files in ``var/run/``
-To work with a local environment from a directory outside it, issue ``tt`` calls with
-the ``-L`` or ``--local`` argument with the path to this environment as its value:
+.. option:: -c, --config PATH
-.. code-block:: console
+ **Since:** :doc:`3.0.0 `.
- $ tt --local=/usr/tt/env/ start
+ Set a path to a :ref:`YAML configuration file `.
+ You can also configure this value using the ``TT_CONFIG`` environment variable.
-.. _admin-start_stop_instance-systemd:
+ See also: :ref:`Starting an instance using the tarantool command `
-Using systemd tools
--------------------
+.. option:: -n, --name INSTANCE
-If you start an instance using ``systemd`` tools, like this (the instance name
-is ``my_app``):
+ **Since:** :doc:`3.0.0 `.
-.. code-block:: console
+ Set the name of an instance to run.
+ You can also configure this value using the ``TT_INSTANCE_NAME`` environment variable.
- $ systemctl start tarantool@my_app
- $ ps axuf|grep my_app
- taranto+ 5350 1.3 0.3 1448872 7736 ? Ssl 20:05 0:28 tarantool my_app.lua
+ See also: :ref:`Starting an instance using the tarantool command `
-This actually calls ``tarantoolctl`` like in case of
-``tarantoolctl start my_app``.
-To enable ``my_app`` instance for auto-load during system startup, say:
+.. option:: -i
-.. code-block:: console
+ Enter an :ref:`interactive mode `.
- $ systemctl enable tarantool@my_app
+ **Example**
-To stop a running ``my_app`` instance with ``systemctl``, run:
+ .. code-block:: console
-.. code-block:: console
+ $ tarantool -i
- $ systemctl stop tarantool@my_app
-To restart a running ``my_app`` instance with ``systemctl``, run:
+.. option:: -e EXPR
-.. code-block:: console
+ Execute the 'EXPR' string. See also: `lua man page `_.
+
+ **Example**
+
+ .. code-block:: console
+
+ $ tarantool -e 'print("Hello, world!")'
+ Hello, world!
+
+.. option:: -l NAME
+
+ Require the 'NAME' library. See also: `lua man page `_.
+
+ **Example**
+
+ .. code-block:: console
+
+ $ tarantool -l luatest.coverage script.lua
+
+.. option:: -j cmd
+
+ Perform a LuaJIT control command. See also: `Command Line Options `_.
+
+ **Example**
+
+ .. code-block:: console
+
+ $ tarantool -j off app.lua
+
+.. option:: -b ...
+
+ Save or list bytecode. See also: `Command Line Options `_.
+
+ **Example**
+
+ .. code-block:: console
+
+ $ tarantool -b test.lua test.out
+
+.. option:: -d SCRIPT
+
+ Activate a debugging session for 'SCRIPT'. See also: `luadebug.lua `_.
+
+ **Example**
+
+ .. code-block:: console
+
+ $ tarantool -d app.lua
+
+
+.. option:: --
+
+ Stop handling options. See also: `lua man page `_.
+
+
+.. option:: -
- $ systemctl restart tarantool@my_app
+ Stop handling options and execute the standard input as a file. See also: `lua man page `_.
\ No newline at end of file
diff --git a/doc/book/admin/upgrades/upgrade_cluster.rst b/doc/book/admin/upgrades/upgrade_cluster.rst
index 7444cfd63a..3185e60a6d 100644
--- a/doc/book/admin/upgrades/upgrade_cluster.rst
+++ b/doc/book/admin/upgrades/upgrade_cluster.rst
@@ -98,11 +98,11 @@ Upgrading storages
Before upgrading **storage** instances:
-* Disable :doc:`Cartridge failover `: run
+* Disable Cartridge failover: run
.. code-block:: bash
- cartridge failover disable
+ tt cartridge failover disable
or use the Cartridge web interface (**Cluster** tab, **Failover: ** button).
@@ -112,8 +112,7 @@ Before upgrading **storage** instances:
tarantool> vshard.storage.rebalancer_disable()
-* Make sure that the Cartridge ``upgrade_schema`` :doc:`option `
- is ``false``.
+* Make sure that the Cartridge ``upgrade_schema`` option is ``false``.
.. include:: ../_includes/upgrade_storages.rst
@@ -130,11 +129,11 @@ Before upgrading **storage** instances:
Once you complete the steps, enable failover or rebalancer back:
-* Enable :doc:`Cartridge failover `: run
+* Enable Cartridge failover: run
.. code-block:: bash
- cartridge failover set [mode]
+ tt cartridge failover set [mode]
or use the Cartridge web interface (**Cluster** tab, **Failover: Disabled** button).
diff --git a/doc/book/cartridge/cartridge_overview.rst b/doc/book/cartridge/cartridge_overview.rst
deleted file mode 100644
index 1f7e75ca3c..0000000000
--- a/doc/book/cartridge/cartridge_overview.rst
+++ /dev/null
@@ -1,48 +0,0 @@
-.. _cartridge-overview:
-
-================================================================================
-About Tarantool Cartridge
-================================================================================
-
-Tarantool Cartridge is the recommended alternative to the
-:ref:`old-school practices ` of application development
-for Tarantool.
-
-.. _cluster-app:
-
-As a software development kit (SDK), Tarantool Cartridge provides you with
-utilities and :ref:`templates ` to help:
-
-* easily set up a development environment for your applications;
-* plug the necessary Lua modules.
-
-The resulting package can be installed and started on one or multiple servers
-as one or multiple instantiated services -- independent or organized into a
-**cluster**.
-
-.. NOTE::
-
- A Tarantool cluster is a collection of Tarantool instances acting in concert.
- While a single Tarantool instance can leverage the performance of a single server
- and is vulnerable to failure, the cluster spans multiple servers, utilizes their
- cumulative CPU power, and is fault-tolerant.
-
- To fully utilize the capabilities of a Tarantool cluster, you need to
- develop applications keeping in mind they are to run in a cluster environment.
-
-Further on, Tarantool Cartridge provides your cluster-aware applications with
-the following benefits:
-
-* horizontal scalability and load balancing via built-in automatic sharding;
-* asynchronous replication;
-* automatic failover;
-* centralized cluster control via GUI or API;
-* automatic configuration synchronization;
-* instance functionality segregation.
-
-A Tarantool Cartridge cluster can segregate functionality between instances via
-built-in and custom (user-defined) :ref:`cluster roles `.
-You can toggle instances on and off on the fly during cluster operation.
-This allows you to put different types of workloads
-(e.g., compute- and transaction-intensive ones) on different physical servers
-with dedicated hardware.
diff --git a/doc/book/cartridge/images/auth_creds.png b/doc/book/cartridge/images/auth_creds.png
deleted file mode 100644
index 04a5c14488..0000000000
Binary files a/doc/book/cartridge/images/auth_creds.png and /dev/null differ
diff --git a/doc/book/cartridge/images/bootstrap-vshard.png b/doc/book/cartridge/images/bootstrap-vshard.png
deleted file mode 100644
index 71734bc85b..0000000000
Binary files a/doc/book/cartridge/images/bootstrap-vshard.png and /dev/null differ
diff --git a/doc/book/cartridge/images/change-weight.png b/doc/book/cartridge/images/change-weight.png
deleted file mode 100644
index bdd0aa5dfb..0000000000
Binary files a/doc/book/cartridge/images/change-weight.png and /dev/null differ
diff --git a/doc/book/cartridge/images/create-router.png b/doc/book/cartridge/images/create-router.png
deleted file mode 100644
index dfb43a331b..0000000000
Binary files a/doc/book/cartridge/images/create-router.png and /dev/null differ
diff --git a/doc/book/cartridge/images/create-storage.png b/doc/book/cartridge/images/create-storage.png
deleted file mode 100644
index d0a6189e48..0000000000
Binary files a/doc/book/cartridge/images/create-storage.png and /dev/null differ
diff --git a/doc/book/cartridge/images/edit-replica-set.png b/doc/book/cartridge/images/edit-replica-set.png
deleted file mode 100644
index d4e2d105d4..0000000000
Binary files a/doc/book/cartridge/images/edit-replica-set.png and /dev/null differ
diff --git a/doc/book/cartridge/images/enabled-failover.png b/doc/book/cartridge/images/enabled-failover.png
deleted file mode 100644
index 48a3f3e298..0000000000
Binary files a/doc/book/cartridge/images/enabled-failover.png and /dev/null differ
diff --git a/doc/book/cartridge/images/expelling-instance.png b/doc/book/cartridge/images/expelling-instance.png
deleted file mode 100644
index 41d9d9a361..0000000000
Binary files a/doc/book/cartridge/images/expelling-instance.png and /dev/null differ
diff --git a/doc/book/cartridge/images/failover-control.png b/doc/book/cartridge/images/failover-control.png
deleted file mode 100644
index 6eac960da8..0000000000
Binary files a/doc/book/cartridge/images/failover-control.png and /dev/null differ
diff --git a/doc/book/cartridge/images/failover-priority.png b/doc/book/cartridge/images/failover-priority.png
deleted file mode 100644
index fa9ece3cf8..0000000000
Binary files a/doc/book/cartridge/images/failover-priority.png and /dev/null differ
diff --git a/doc/book/cartridge/images/failover.png b/doc/book/cartridge/images/failover.png
deleted file mode 100644
index c51839d432..0000000000
Binary files a/doc/book/cartridge/images/failover.png and /dev/null differ
diff --git a/doc/book/cartridge/images/final-cluster.png b/doc/book/cartridge/images/final-cluster.png
deleted file mode 100644
index 378cab79b4..0000000000
Binary files a/doc/book/cartridge/images/final-cluster.png and /dev/null differ
diff --git a/doc/book/cartridge/images/join-new-set.png b/doc/book/cartridge/images/join-new-set.png
deleted file mode 100644
index 58436cf0b0..0000000000
Binary files a/doc/book/cartridge/images/join-new-set.png and /dev/null differ
diff --git a/doc/book/cartridge/images/join-router.png b/doc/book/cartridge/images/join-router.png
deleted file mode 100644
index 651f07e791..0000000000
Binary files a/doc/book/cartridge/images/join-router.png and /dev/null differ
diff --git a/doc/book/cartridge/images/join-storage.png b/doc/book/cartridge/images/join-storage.png
deleted file mode 100644
index aa5fa0b18e..0000000000
Binary files a/doc/book/cartridge/images/join-storage.png and /dev/null differ
diff --git a/doc/book/cartridge/images/new-unconfig.png b/doc/book/cartridge/images/new-unconfig.png
deleted file mode 100644
index f148aaede9..0000000000
Binary files a/doc/book/cartridge/images/new-unconfig.png and /dev/null differ
diff --git a/doc/book/cartridge/images/probe-server.png b/doc/book/cartridge/images/probe-server.png
deleted file mode 100644
index 872287f95b..0000000000
Binary files a/doc/book/cartridge/images/probe-server.png and /dev/null differ
diff --git a/doc/book/cartridge/images/switch-master.png b/doc/book/cartridge/images/switch-master.png
deleted file mode 100644
index 59168413c1..0000000000
Binary files a/doc/book/cartridge/images/switch-master.png and /dev/null differ
diff --git a/doc/book/cartridge/images/unconfigured-router.png b/doc/book/cartridge/images/unconfigured-router.png
deleted file mode 100644
index fdec390d11..0000000000
Binary files a/doc/book/cartridge/images/unconfigured-router.png and /dev/null differ
diff --git a/doc/book/cartridge/images/users-tab.png b/doc/book/cartridge/images/users-tab.png
deleted file mode 100644
index 302601c6e0..0000000000
Binary files a/doc/book/cartridge/images/users-tab.png and /dev/null differ
diff --git a/doc/book/cartridge/images/zero-weight.png b/doc/book/cartridge/images/zero-weight.png
deleted file mode 100644
index 926318cab0..0000000000
Binary files a/doc/book/cartridge/images/zero-weight.png and /dev/null differ
diff --git a/doc/book/cartridge/images/zero.png b/doc/book/cartridge/images/zero.png
deleted file mode 100644
index 50654147e1..0000000000
Binary files a/doc/book/cartridge/images/zero.png and /dev/null differ
diff --git a/doc/book/connectors/java.rst b/doc/book/connectors/java.rst
index 6cb53ee2f4..d13784fb31 100644
--- a/doc/book/connectors/java.rst
+++ b/doc/book/connectors/java.rst
@@ -8,7 +8,7 @@ There are two Java connectors available:
* `cartridge-java `__
supports both single Tarantool nodes and clusters,
as well as applications built using the
- :doc:`Cartridge framework ` and its modules.
+ `Cartridge framework `__ and its modules.
The Tarantool team actively updates this module with the newest Tarantool features.
* `tarantool-java `__
works with early Tarantool versions (1.6 and later)
diff --git a/doc/book/index.rst b/doc/book/index.rst
index 0ad7da7169..aed53f9158 100644
--- a/doc/book/index.rst
+++ b/doc/book/index.rst
@@ -15,7 +15,6 @@ User's Guide
../how-to/index
../concepts/index
box/index
- cartridge/index
admin/index
monitoring/index
connectors
diff --git a/doc/code_snippets/README.md b/doc/code_snippets/README.md
index 23f5d9108c..3ee120f28b 100644
--- a/doc/code_snippets/README.md
+++ b/doc/code_snippets/README.md
@@ -1,25 +1,43 @@
# Tarantool code examples
-The `doc/code_snippets` folder of a Tarantool documentation repository contains runnable code examples that show how to work with various Tarantool modules. Code from these examples is [referenced](#referencing-code-snippets) in corresponding documentation sections.
+The `doc/code_snippets` folder of a Tarantool documentation repository contains runnable code examples that show how to work with Tarantool:
+
+- The [snippets](snippets) folder contains sample applications that demonstrate how to configure a Tarantool cluster.
+- The [test](test) folder contains testable Lua examples that show how to work with various Tarantool modules.
+
+Code from these examples is [referenced](#referencing-code-snippets) in corresponding documentation sections.
## Prerequisites
-First, install the [tt CLI utility](https://www.tarantool.io/en/doc/latest/reference/tooling/tt_cli/).
-Then, go to the `doc/code_snippets` folder and install the following libraries:
+- Install the [tt CLI utility](https://www.tarantool.io/en/doc/latest/reference/tooling/tt_cli/).
+- To be able to run tests for samples from [test](test), go to the `doc/code_snippets` folder and install the following libraries:
-- Install [luatest](https://github.com/tarantool/luatest):
- ```shell
- tt rocks install luatest
- ```
+ - [luatest](https://github.com/tarantool/luatest):
+ ```shell
+ tt rocks install luatest
+ ```
+
+ - [luarapidxml](https://github.com/tarantool/luarapidxml):
+ ```shell
+ tt rocks install luarapidxml
+ ```
-- Install [luarapidxml](https://github.com/tarantool/luarapidxml):
- ```shell
- tt rocks install luarapidxml
- ```
+## Running
+
+### Running applications from 'snippets'
+
+To run applications placed in [snippets](snippets), follow these steps:
+
+1. Go to the directory containing samples for a specific feature, for example, [snippets/replication](snippets/replication).
+2. To run applications placed in [instances.enabled](instances.enabled), execute the `tt start` command, for example:
+
+ ```console
+ $ tt start auto_leader
+ ```
-## Running and testing examples
+### Running and testing examples from 'test'
To test all the examples, go to the `doc/code_snippets` folder and execute the `luatest` command:
diff --git a/doc/code_snippets/snippets/config/README.md b/doc/code_snippets/snippets/config/README.md
new file mode 100644
index 0000000000..2e2586e964
--- /dev/null
+++ b/doc/code_snippets/snippets/config/README.md
@@ -0,0 +1,11 @@
+# Configuration
+
+A sample application demonstrating various features related to Tarantool [configuration](https://www.tarantool.io/en/doc/latest/concepts/configuration/).
+
+## Running
+
+To run applications placed in [instances.enabled](instances.enabled), go to the `config` directory in the terminal and execute the `tt start` command, for example:
+
+```console
+$ tt start application
+```
diff --git a/doc/code_snippets/snippets/config/instances.enabled/application/config.yaml b/doc/code_snippets/snippets/config/instances.enabled/application/config.yaml
new file mode 100644
index 0000000000..e51dddc6c4
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/application/config.yaml
@@ -0,0 +1,13 @@
+app:
+ file: 'myapp.lua'
+ cfg:
+ greeting: 'Hello'
+
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ instances:
+ instance001:
+ iproto:
+ listen: "3301"
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/application/instances.yml b/doc/code_snippets/snippets/config/instances.enabled/application/instances.yml
new file mode 100644
index 0000000000..aa60c2fc42
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/application/instances.yml
@@ -0,0 +1 @@
+instance001:
diff --git a/doc/code_snippets/snippets/config/instances.enabled/application/myapp.lua b/doc/code_snippets/snippets/config/instances.enabled/application/myapp.lua
new file mode 100644
index 0000000000..81322b1ead
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/application/myapp.lua
@@ -0,0 +1,4 @@
+-- myapp.lua --
+local log = require('log').new("myapp")
+local config = require('config')
+log.info("%s from app, %s!", config:get('app.cfg.greeting'), box.info.name)
diff --git a/doc/code_snippets/snippets/config/instances.enabled/etcd/config.yaml b/doc/code_snippets/snippets/config/instances.enabled/etcd/config.yaml
new file mode 100644
index 0000000000..e38344ebd9
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/etcd/config.yaml
@@ -0,0 +1,5 @@
+config:
+ etcd:
+ endpoints:
+ - http://localhost:2379
+ prefix: /example
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/etcd/instances.yml b/doc/code_snippets/snippets/config/instances.enabled/etcd/instances.yml
new file mode 100644
index 0000000000..6c765b2e67
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/etcd/instances.yml
@@ -0,0 +1,3 @@
+instance001:
+instance002:
+instance003:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/etcd_full/config.yaml b/doc/code_snippets/snippets/config/instances.enabled/etcd_full/config.yaml
new file mode 100644
index 0000000000..479314e097
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/etcd_full/config.yaml
@@ -0,0 +1,12 @@
+config:
+ etcd:
+ endpoints:
+ - http://localhost:2379
+ prefix: /example
+ username: testuser
+ password: foobar
+ ssl:
+ ca_file: ca.crt
+ http:
+ request:
+ timeout: 3
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/etcd_full/instances.yml b/doc/code_snippets/snippets/config/instances.enabled/etcd_full/instances.yml
new file mode 100644
index 0000000000..6c765b2e67
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/etcd_full/instances.yml
@@ -0,0 +1,3 @@
+instance001:
+instance002:
+instance003:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/global_scope/config.yaml b/doc/code_snippets/snippets/config/instances.enabled/global_scope/config.yaml
new file mode 100644
index 0000000000..fbb6edb7de
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/global_scope/config.yaml
@@ -0,0 +1,9 @@
+iproto:
+ listen: "3301"
+
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ instances:
+ instance001: {}
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/global_scope/instances.yml b/doc/code_snippets/snippets/config/instances.enabled/global_scope/instances.yml
new file mode 100644
index 0000000000..aa60c2fc42
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/global_scope/instances.yml
@@ -0,0 +1 @@
+instance001:
diff --git a/doc/code_snippets/snippets/config/instances.enabled/group_scope/config.yaml b/doc/code_snippets/snippets/config/instances.enabled/group_scope/config.yaml
new file mode 100644
index 0000000000..8a226fd4db
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/group_scope/config.yaml
@@ -0,0 +1,8 @@
+groups:
+ group001:
+ iproto:
+ listen: "3301"
+ replicasets:
+ replicaset001:
+ instances:
+ instance001: {}
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/group_scope/instances.yml b/doc/code_snippets/snippets/config/instances.enabled/group_scope/instances.yml
new file mode 100644
index 0000000000..aa60c2fc42
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/group_scope/instances.yml
@@ -0,0 +1 @@
+instance001:
diff --git a/doc/code_snippets/snippets/config/instances.enabled/instance_scope/config.yaml b/doc/code_snippets/snippets/config/instances.enabled/instance_scope/config.yaml
new file mode 100644
index 0000000000..b2ecb4cafa
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/instance_scope/config.yaml
@@ -0,0 +1,8 @@
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ instances:
+ instance001:
+ iproto:
+ listen: "3301"
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/instance_scope/instances.yml b/doc/code_snippets/snippets/config/instances.enabled/instance_scope/instances.yml
new file mode 100644
index 0000000000..aa60c2fc42
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/instance_scope/instances.yml
@@ -0,0 +1 @@
+instance001:
diff --git a/doc/code_snippets/snippets/config/instances.enabled/replicaset_scope/config.yaml b/doc/code_snippets/snippets/config/instances.enabled/replicaset_scope/config.yaml
new file mode 100644
index 0000000000..52ecedf244
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/replicaset_scope/config.yaml
@@ -0,0 +1,8 @@
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ iproto:
+ listen: "3301"
+ instances:
+ instance001: {}
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/replicaset_scope/instances.yml b/doc/code_snippets/snippets/config/instances.enabled/replicaset_scope/instances.yml
new file mode 100644
index 0000000000..aa60c2fc42
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/replicaset_scope/instances.yml
@@ -0,0 +1 @@
+instance001:
diff --git a/doc/code_snippets/snippets/config/instances.enabled/templating/config.yaml b/doc/code_snippets/snippets/config/instances.enabled/templating/config.yaml
new file mode 100644
index 0000000000..5df487e662
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/templating/config.yaml
@@ -0,0 +1,10 @@
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ instances:
+ instance001:
+ snapshot:
+ dir: ./var/{{ instance_name }}/snapshots
+ wal:
+ dir: ./var/{{ instance_name }}/wals
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/config/instances.enabled/templating/instances.yml b/doc/code_snippets/snippets/config/instances.enabled/templating/instances.yml
new file mode 100644
index 0000000000..aa60c2fc42
--- /dev/null
+++ b/doc/code_snippets/snippets/config/instances.enabled/templating/instances.yml
@@ -0,0 +1 @@
+instance001:
diff --git a/doc/code_snippets/snippets/config/tt.yaml b/doc/code_snippets/snippets/config/tt.yaml
new file mode 100644
index 0000000000..41a3915f50
--- /dev/null
+++ b/doc/code_snippets/snippets/config/tt.yaml
@@ -0,0 +1,54 @@
+modules:
+ # Directory where the external modules are stored.
+ directory: modules
+
+env:
+ # Restart instance on failure.
+ restart_on_failure: false
+
+ # Directory that stores binary files.
+ bin_dir: bin
+
+ # Directory that stores Tarantool header files.
+ inc_dir: include
+
+ # Path to directory that stores all applications.
+ # The directory can also contain symbolic links to applications.
+ instances_enabled: instances.enabled
+
+ # Tarantoolctl artifacts layout compatibility: if set to true tt will not create application
+ # sub-directories for control socket, pid files, log files, etc.. Data files (wal, vinyl,
+ # snap) and multi-instance applications are not affected by this option.
+ tarantoolctl_layout: false
+
+app:
+ # Directory that stores various instance runtime
+ # artifacts like console socket, PID file, etc.
+ run_dir: var/run
+
+ # Directory that stores log files.
+ log_dir: var/log
+
+ # Directory where write-ahead log (.xlog) files are stored.
+ wal_dir: var/lib
+
+ # Directory where memtx stores snapshot (.snap) files.
+ memtx_dir: var/lib
+
+ # Directory where vinyl files or subdirectories will be stored.
+ vinyl_dir: var/lib
+
+# Path to file with credentials for downloading Tarantool Enterprise Edition.
+# credential_path: /path/to/file
+ee:
+ credential_path:
+
+templates:
+ # The path to templates search directory.
+ - path: templates
+
+repo:
+ # Directory where local rocks files could be found.
+ rocks:
+ # Directory that stores installation files.
+ distfiles: distfiles
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/README.md b/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/README.md
new file mode 100644
index 0000000000..c8256c2a5c
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/README.md
@@ -0,0 +1,11 @@
+# Master-replica: automated failover
+
+A sample application demonstrating how to bootstrap a replica set with [automated failover](https://www.tarantool.io/en/doc/latest/how-to/replication/repl_bootstrap_auto/).
+
+## Running
+
+To start all instances, execute the following command in the [replication](../../../replication) directory:
+
+```console
+$ tt start auto_leader
+```
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/config.yaml b/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/config.yaml
new file mode 100644
index 0000000000..d5b42eed83
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/config.yaml
@@ -0,0 +1,31 @@
+credentials:
+ users:
+ replicator:
+ password: 'topsecret'
+ roles: [replication]
+
+iproto:
+ advertise:
+ peer: replicator@
+
+replication:
+ failover: election
+
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ instances:
+ instance001:
+ iproto:
+ listen: 127.0.0.1:3301
+ instance002:
+ iproto:
+ listen: 127.0.0.1:3302
+ instance003:
+ iproto:
+ listen: 127.0.0.1:3303
+
+# Load sample data
+app:
+ file: 'data.lua'
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/data.lua b/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/data.lua
new file mode 100644
index 0000000000..27b0b6bbf6
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/data.lua
@@ -0,0 +1,32 @@
+function create_space()
+ box.schema.space.create('bands')
+ box.space.bands:format({
+ { name = 'id', type = 'unsigned' },
+ { name = 'band_name', type = 'string' },
+ { name = 'year', type = 'unsigned' }
+ })
+ box.space.bands:create_index('primary', { parts = { 'id' } })
+end
+
+function create_sync_space()
+ box.schema.space.create('bands', { is_sync = true })
+ box.space.bands:format({
+ { name = 'id', type = 'unsigned' },
+ { name = 'band_name', type = 'string' },
+ { name = 'year', type = 'unsigned' }
+ })
+ box.space.bands:create_index('primary', { parts = { 'id' } })
+end
+
+function load_data()
+ box.space.bands:insert { 1, 'Roxette', 1986 }
+ box.space.bands:insert { 2, 'Scorpions', 1965 }
+ box.space.bands:insert { 3, 'Ace of Base', 1987 }
+ box.space.bands:insert { 4, 'The Beatles', 1960 }
+ box.space.bands:insert { 5, 'Pink Floyd', 1965 }
+ box.space.bands:insert { 6, 'The Rolling Stones', 1962 }
+ box.space.bands:insert { 7, 'The Doors', 1965 }
+ box.space.bands:insert { 8, 'Nirvana', 1987 }
+ box.space.bands:insert { 9, 'Led Zeppelin', 1968 }
+ box.space.bands:insert { 10, 'Queen', 1970 }
+end
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/instances.yml b/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/instances.yml
new file mode 100644
index 0000000000..6c765b2e67
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/auto_leader/instances.yml
@@ -0,0 +1,3 @@
+instance001:
+instance002:
+instance003:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/README.md b/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/README.md
new file mode 100644
index 0000000000..f7e99e1308
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/README.md
@@ -0,0 +1,11 @@
+# replication.bootstrap_strategy
+
+A sample application demonstrating how to use the specified instance to bootstrap a replica set.
+
+## Running
+
+To start all instances, execute the following command in the [replication](../../../replication) directory:
+
+```console
+$ tt start bootstrap_strategy
+```
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/config.yaml b/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/config.yaml
new file mode 100644
index 0000000000..c6bdfda820
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/config.yaml
@@ -0,0 +1,30 @@
+credentials:
+ users:
+ replicator:
+ password: 'topsecret'
+ roles: [replication]
+
+iproto:
+ advertise:
+ peer: replicator@
+
+replication:
+ failover: election
+
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ replication:
+ bootstrap_strategy: config
+ bootstrap_leader: instance001
+ instances:
+ instance001:
+ iproto:
+ listen: 127.0.0.1:3301
+ instance002:
+ iproto:
+ listen: 127.0.0.1:3302
+ instance003:
+ iproto:
+ listen: 127.0.0.1:3303
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/instances.yml b/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/instances.yml
new file mode 100644
index 0000000000..6c765b2e67
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/bootstrap_strategy/instances.yml
@@ -0,0 +1,3 @@
+instance001:
+instance002:
+instance003:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/README.md b/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/README.md
new file mode 100644
index 0000000000..e6724f65ee
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/README.md
@@ -0,0 +1,11 @@
+# Master-replica: manual failover
+
+A sample application demonstrating how to bootstrap a replica set with [manual failover](https://www.tarantool.io/en/doc/latest/how-to/replication/repl_bootstrap/).
+
+## Running
+
+To start all instances, execute the following command in the [replication](../../../replication) directory:
+
+```console
+$ tt start manual_leader
+```
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/config.yaml b/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/config.yaml
new file mode 100644
index 0000000000..3b30a4b6cb
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/config.yaml
@@ -0,0 +1,32 @@
+credentials:
+ users:
+ replicator:
+ password: 'topsecret'
+ roles: [replication]
+
+iproto:
+ advertise:
+ peer: replicator@
+
+replication:
+ failover: manual
+
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ leader: instance001
+ instances:
+ instance001:
+ iproto:
+ listen: 127.0.0.1:3301
+ instance002:
+ iproto:
+ listen: 127.0.0.1:3302
+ instance003:
+ iproto:
+ listen: 127.0.0.1:3303
+
+# Load sample data
+app:
+ file: 'myapp.lua'
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/instances.yml b/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/instances.yml
new file mode 100644
index 0000000000..6c765b2e67
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/instances.yml
@@ -0,0 +1,3 @@
+instance001:
+instance002:
+instance003:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/myapp.lua b/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/myapp.lua
new file mode 100644
index 0000000000..321db0aab1
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/manual_leader/myapp.lua
@@ -0,0 +1,22 @@
+function create_space()
+ box.schema.space.create('bands')
+ box.space.bands:format({
+ { name = 'id', type = 'unsigned' },
+ { name = 'band_name', type = 'string' },
+ { name = 'year', type = 'unsigned' }
+ })
+ box.space.bands:create_index('primary', { parts = { 'id' } })
+end
+
+function load_data()
+ box.space.bands:insert { 1, 'Roxette', 1986 }
+ box.space.bands:insert { 2, 'Scorpions', 1965 }
+ box.space.bands:insert { 3, 'Ace of Base', 1987 }
+ box.space.bands:insert { 4, 'The Beatles', 1960 }
+ box.space.bands:insert { 5, 'Pink Floyd', 1965 }
+ box.space.bands:insert { 6, 'The Rolling Stones', 1962 }
+ box.space.bands:insert { 7, 'The Doors', 1965 }
+ box.space.bands:insert { 8, 'Nirvana', 1987 }
+ box.space.bands:insert { 9, 'Led Zeppelin', 1968 }
+ box.space.bands:insert { 10, 'Queen', 1970 }
+end
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/master_master/README.md b/doc/code_snippets/snippets/replication/instances.enabled/master_master/README.md
new file mode 100644
index 0000000000..ff0d938a0e
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/master_master/README.md
@@ -0,0 +1,11 @@
+# Master-master
+
+A sample application demonstrating how to bootstrap a [master-master](https://www.tarantool.io/en/doc/latest/how-to/replication/repl_bootstrap_master_master/) replica set.
+
+## Running
+
+To start all instances, execute the following command in the [replication](../../../replication) directory:
+
+```console
+$ tt start master_master
+```
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/master_master/config.yaml b/doc/code_snippets/snippets/replication/instances.enabled/master_master/config.yaml
new file mode 100644
index 0000000000..7eb2da12eb
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/master_master/config.yaml
@@ -0,0 +1,32 @@
+credentials:
+ users:
+ replicator:
+ password: 'topsecret'
+ roles: [replication]
+
+iproto:
+ advertise:
+ peer: replicator@
+
+replication:
+ failover: off
+
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ instances:
+ instance001:
+ database:
+ mode: rw
+ iproto:
+ listen: 127.0.0.1:3301
+ instance002:
+ database:
+ mode: rw
+ iproto:
+ listen: 127.0.0.1:3302
+
+# Load sample data
+app:
+ file: 'myapp.lua'
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/master_master/instances.yml b/doc/code_snippets/snippets/replication/instances.enabled/master_master/instances.yml
new file mode 100644
index 0000000000..75e286d69c
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/master_master/instances.yml
@@ -0,0 +1,2 @@
+instance001:
+instance002:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/master_master/myapp.lua b/doc/code_snippets/snippets/replication/instances.enabled/master_master/myapp.lua
new file mode 100644
index 0000000000..321db0aab1
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/master_master/myapp.lua
@@ -0,0 +1,22 @@
+function create_space()
+ box.schema.space.create('bands')
+ box.space.bands:format({
+ { name = 'id', type = 'unsigned' },
+ { name = 'band_name', type = 'string' },
+ { name = 'year', type = 'unsigned' }
+ })
+ box.space.bands:create_index('primary', { parts = { 'id' } })
+end
+
+function load_data()
+ box.space.bands:insert { 1, 'Roxette', 1986 }
+ box.space.bands:insert { 2, 'Scorpions', 1965 }
+ box.space.bands:insert { 3, 'Ace of Base', 1987 }
+ box.space.bands:insert { 4, 'The Beatles', 1960 }
+ box.space.bands:insert { 5, 'Pink Floyd', 1965 }
+ box.space.bands:insert { 6, 'The Rolling Stones', 1962 }
+ box.space.bands:insert { 7, 'The Doors', 1965 }
+ box.space.bands:insert { 8, 'Nirvana', 1987 }
+ box.space.bands:insert { 9, 'Led Zeppelin', 1968 }
+ box.space.bands:insert { 10, 'Queen', 1970 }
+end
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/peers/config.yaml b/doc/code_snippets/snippets/replication/instances.enabled/peers/config.yaml
new file mode 100644
index 0000000000..ac3ed89768
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/peers/config.yaml
@@ -0,0 +1,27 @@
+credentials:
+ users:
+ replicator:
+ password: 'topsecret'
+ roles: [replication]
+
+replication:
+ peers:
+ - replicator:topsecret@127.0.0.1:3301
+ - replicator:topsecret@127.0.0.1:3302
+ - replicator:topsecret@127.0.0.1:3303
+ failover: election
+
+groups:
+ group001:
+ replicasets:
+ replicaset001:
+ instances:
+ instance001:
+ iproto:
+ listen: 127.0.0.1:3301
+ instance002:
+ iproto:
+ listen: 127.0.0.1:3302
+ instance003:
+ iproto:
+ listen: 127.0.0.1:3303
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/instances.enabled/peers/instances.yml b/doc/code_snippets/snippets/replication/instances.enabled/peers/instances.yml
new file mode 100644
index 0000000000..6c765b2e67
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/instances.enabled/peers/instances.yml
@@ -0,0 +1,3 @@
+instance001:
+instance002:
+instance003:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/replication/tt.yaml b/doc/code_snippets/snippets/replication/tt.yaml
new file mode 100644
index 0000000000..41a3915f50
--- /dev/null
+++ b/doc/code_snippets/snippets/replication/tt.yaml
@@ -0,0 +1,54 @@
+modules:
+ # Directory where the external modules are stored.
+ directory: modules
+
+env:
+ # Restart instance on failure.
+ restart_on_failure: false
+
+ # Directory that stores binary files.
+ bin_dir: bin
+
+ # Directory that stores Tarantool header files.
+ inc_dir: include
+
+ # Path to directory that stores all applications.
+ # The directory can also contain symbolic links to applications.
+ instances_enabled: instances.enabled
+
+ # Tarantoolctl artifacts layout compatibility: if set to true tt will not create application
+ # sub-directories for control socket, pid files, log files, etc.. Data files (wal, vinyl,
+ # snap) and multi-instance applications are not affected by this option.
+ tarantoolctl_layout: false
+
+app:
+ # Directory that stores various instance runtime
+ # artifacts like console socket, PID file, etc.
+ run_dir: var/run
+
+ # Directory that stores log files.
+ log_dir: var/log
+
+ # Directory where write-ahead log (.xlog) files are stored.
+ wal_dir: var/lib
+
+ # Directory where memtx stores snapshot (.snap) files.
+ memtx_dir: var/lib
+
+ # Directory where vinyl files or subdirectories will be stored.
+ vinyl_dir: var/lib
+
+# Path to file with credentials for downloading Tarantool Enterprise Edition.
+# credential_path: /path/to/file
+ee:
+ credential_path:
+
+templates:
+ # The path to templates search directory.
+ - path: templates
+
+repo:
+ # Directory where local rocks files could be found.
+ rocks:
+ # Directory that stores installation files.
+ distfiles: distfiles
diff --git a/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/README.md b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/README.md
new file mode 100644
index 0000000000..f3873f099b
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/README.md
@@ -0,0 +1,72 @@
+# Sharded cluster
+
+A sample application demonstrating how to configure a [sharded](https://www.tarantool.io/en/doc/latest/concepts/sharding/) cluster.
+
+## Running
+
+To run the cluster, go to the `sharding` directory in the terminal and perform the following steps:
+
+1. Install `vshard`:
+
+ ```console
+ $ tt rocks install vshard
+ ```
+
+2. Run the cluster:
+
+ ```console
+ $ tt start sharded_cluster
+ ```
+
+3. Connect to the router:
+
+ ```console
+ $ tt connect sharded_cluster:router-a-001
+ ```
+
+4. Insert test data:
+
+ ```console
+ sharded_cluster:router-a-001> insert_data()
+ ---
+ ...
+ ```
+
+5. Connect to storages in different replica sets to see how data is distributed across nodes:
+
+ a. `storage-a-001`:
+
+ ```console
+ sharded_cluster:storage-a-001> box.space.bands:select()
+ ---
+ - - [1, 614, 'Roxette', 1986]
+ - [2, 986, 'Scorpions', 1965]
+ - [5, 755, 'Pink Floyd', 1965]
+ - [7, 998, 'The Doors', 1965]
+ - [8, 762, 'Nirvana', 1987]
+ ...
+ ```
+
+ b. `storage-b-001`:
+
+ ```console
+ sharded_cluster:storage-b-001> box.space.bands:select()
+ ---
+ - - [3, 11, 'Ace of Base', 1987]
+ - [4, 42, 'The Beatles', 1960]
+ - [6, 55, 'The Rolling Stones', 1962]
+ - [9, 299, 'Led Zeppelin', 1968]
+ - [10, 167, 'Queen', 1970]
+ ...
+ ```
+
+
+## Packaging
+
+To package an application into a `.tgz` archive, use the `tt pack` command:
+
+```console
+$ tt pack tgz --app-list sharded_cluster
+```
+
+Note that the necessary `vshard` dependency is specified in the [sharded_cluster-scm-1.rockspec](sharded_cluster-scm-1.rockspec) file.
diff --git a/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/config.yaml b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/config.yaml
new file mode 100644
index 0000000000..3615996871
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/config.yaml
@@ -0,0 +1,55 @@
+credentials:
+ users:
+ replicator:
+ password: 'topsecret'
+ roles: [replication]
+ storage:
+ password: 'secret'
+ roles: [super]
+
+iproto:
+ advertise:
+ peer: replicator@
+ sharding: storage@
+
+sharding:
+ bucket_count: 1000
+
+groups:
+ storages:
+ app:
+ module: storage
+ sharding:
+ roles: [storage]
+ replication:
+ failover: manual
+ replicasets:
+ storage-a:
+ leader: storage-a-001
+ instances:
+ storage-a-001:
+ iproto:
+ listen: 127.0.0.1:3301
+ storage-a-002:
+ iproto:
+ listen: 127.0.0.1:3302
+ storage-b:
+ leader: storage-b-001
+ instances:
+ storage-b-001:
+ iproto:
+ listen: 127.0.0.1:3303
+ storage-b-002:
+ iproto:
+ listen: 127.0.0.1:3304
+ routers:
+ app:
+ module: router
+ sharding:
+ roles: [router]
+ replicasets:
+ router-a:
+ instances:
+ router-a-001:
+ iproto:
+ listen: 127.0.0.1:3300
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/instances.yaml b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/instances.yaml
new file mode 100644
index 0000000000..368bc16cb6
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/instances.yaml
@@ -0,0 +1,5 @@
+storage-a-001:
+storage-a-002:
+storage-b-001:
+storage-b-002:
+router-a-001:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/router.lua b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/router.lua
new file mode 100644
index 0000000000..bc4e849af5
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/router.lua
@@ -0,0 +1,26 @@
+local vshard = require('vshard')
+
+vshard.router.bootstrap()
+
+function put(id, band_name, year)
+ local bucket_id = vshard.router.bucket_id_mpcrc32({ id })
+ vshard.router.callrw(bucket_id, 'put', { id, bucket_id, band_name, year })
+end
+
+function get(id)
+ local bucket_id = vshard.router.bucket_id_mpcrc32({ id })
+ return vshard.router.callro(bucket_id, 'get', { id })
+end
+
+function insert_data()
+ put(1, 'Roxette', 1986)
+ put(2, 'Scorpions', 1965)
+ put(3, 'Ace of Base', 1987)
+ put(4, 'The Beatles', 1960)
+ put(5, 'Pink Floyd', 1965)
+ put(6, 'The Rolling Stones', 1962)
+ put(7, 'The Doors', 1965)
+ put(8, 'Nirvana', 1987)
+ put(9, 'Led Zeppelin', 1968)
+ put(10, 'Queen', 1970)
+end
diff --git a/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/sharded_cluster-scm-1.rockspec b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/sharded_cluster-scm-1.rockspec
new file mode 100644
index 0000000000..cc9d8ca85b
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/sharded_cluster-scm-1.rockspec
@@ -0,0 +1,12 @@
+package = 'sharded_cluster'
+version = 'scm-1'
+source = {
+ url = '/dev/null',
+}
+
+dependencies = {
+ 'vshard == 0.1.25'
+}
+build = {
+ type = 'none';
+}
diff --git a/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/storage.lua b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/storage.lua
new file mode 100644
index 0000000000..fb9a932349
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/instances.enabled/sharded_cluster/storage.lua
@@ -0,0 +1,23 @@
+box.schema.create_space('bands', {
+ format = {
+ { name = 'id', type = 'unsigned' },
+ { name = 'bucket_id', type = 'unsigned' },
+ { name = 'band_name', type = 'string' },
+ { name = 'year', type = 'unsigned' }
+ },
+ if_not_exists = true
+})
+box.space.bands:create_index('id', { parts = { 'id' }, if_not_exists = true })
+box.space.bands:create_index('bucket_id', { parts = { 'id' }, unique = false, if_not_exists = true })
+
+function put(id, bucket_id, band_name, year)
+ box.space.bands:insert({ id, bucket_id, band_name, year })
+end
+
+function get(id)
+ local tuple = box.space.bands:get(id)
+ if tuple == nil then
+ return nil
+ end
+ return { tuple.id, tuple.band_name, tuple.year }
+end
diff --git a/doc/code_snippets/snippets/sharding/templates/basic/MANIFEST.yaml b/doc/code_snippets/snippets/sharding/templates/basic/MANIFEST.yaml
new file mode 100644
index 0000000000..10f9d4792f
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/templates/basic/MANIFEST.yaml
@@ -0,0 +1,30 @@
+description: Basic template
+vars:
+ - prompt: A name of the user for replication
+ name: replicator_user_name
+ default: replicator
+
+ - prompt: A password for a replicator user
+ name: replicator_user_password
+ re: ^\w+$
+
+ - prompt: A name of the user for sharding
+ name: sharding_user_name
+ default: storage
+
+ - prompt: A password for a sharding user
+ name: sharding_user_password
+ re: ^\w+$
+
+ - prompt: The number of buckets in a cluster
+ name: sharding_bucket_count
+ default: '1000'
+
+ - prompt: A listen URI
+ name: listen_uri
+ default: '127.0.0.1'
+include:
+ - config.yaml
+ - instances.yaml
+ - router.lua
+ - storage.lua
diff --git a/doc/code_snippets/snippets/sharding/templates/basic/config.yaml.tt.template b/doc/code_snippets/snippets/sharding/templates/basic/config.yaml.tt.template
new file mode 100644
index 0000000000..12a5043ea9
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/templates/basic/config.yaml.tt.template
@@ -0,0 +1,55 @@
+credentials:
+ users:
+ {{.replicator_user_name}}:
+ password: '{{.replicator_user_password}}'
+ roles: [replication]
+ {{.sharding_user_name}}:
+ password: '{{.sharding_user_password}}'
+ roles: [super]
+
+iproto:
+ advertise:
+ peer: {{.replicator_user_name}}@
+ sharding: {{.sharding_user_name}}@
+
+sharding:
+ bucket_count: {{.sharding_bucket_count}}
+
+groups:
+ storages:
+ app:
+ module: storage
+ sharding:
+ roles: [storage]
+ replication:
+ failover: manual
+ replicasets:
+ storage-a:
+ leader: storage-a-001
+ instances:
+ storage-a-001:
+ iproto:
+ listen: {{.listen_uri}}:3301
+ storage-a-002:
+ iproto:
+ listen: {{.listen_uri}}:3302
+ storage-b:
+ leader: storage-b-001
+ instances:
+ storage-b-001:
+ iproto:
+ listen: {{.listen_uri}}:3303
+ storage-b-002:
+ iproto:
+ listen: {{.listen_uri}}:3304
+ routers:
+ app:
+ module: router
+ sharding:
+ roles: [router]
+ replicasets:
+ router-a:
+ instances:
+ router-a-001:
+ iproto:
+ listen: {{.listen_uri}}:3300
diff --git a/doc/code_snippets/snippets/sharding/templates/basic/instances.yaml b/doc/code_snippets/snippets/sharding/templates/basic/instances.yaml
new file mode 100644
index 0000000000..368bc16cb6
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/templates/basic/instances.yaml
@@ -0,0 +1,5 @@
+storage-a-001:
+storage-a-002:
+storage-b-001:
+storage-b-002:
+router-a-001:
\ No newline at end of file
diff --git a/doc/code_snippets/snippets/sharding/templates/basic/router.lua b/doc/code_snippets/snippets/sharding/templates/basic/router.lua
new file mode 100644
index 0000000000..ee21da0e5f
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/templates/basic/router.lua
@@ -0,0 +1,5 @@
+local vshard = require('vshard')
+
+vshard.router.bootstrap()
+
+-- Router code --
diff --git a/doc/code_snippets/snippets/sharding/templates/basic/storage.lua b/doc/code_snippets/snippets/sharding/templates/basic/storage.lua
new file mode 100644
index 0000000000..87f063f033
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/templates/basic/storage.lua
@@ -0,0 +1 @@
+-- Storage code --
diff --git a/doc/code_snippets/snippets/sharding/tt.yaml b/doc/code_snippets/snippets/sharding/tt.yaml
new file mode 100644
index 0000000000..41a3915f50
--- /dev/null
+++ b/doc/code_snippets/snippets/sharding/tt.yaml
@@ -0,0 +1,54 @@
+modules:
+ # Directory where the external modules are stored.
+ directory: modules
+
+env:
+ # Restart instance on failure.
+ restart_on_failure: false
+
+ # Directory that stores binary files.
+ bin_dir: bin
+
+ # Directory that stores Tarantool header files.
+ inc_dir: include
+
+ # Path to directory that stores all applications.
+ # The directory can also contain symbolic links to applications.
+ instances_enabled: instances.enabled
+
+ # Tarantoolctl artifacts layout compatibility: if set to true tt will not create application
+ # sub-directories for control socket, pid files, log files, etc.. Data files (wal, vinyl,
+ # snap) and multi-instance applications are not affected by this option.
+ tarantoolctl_layout: false
+
+app:
+ # Directory that stores various instance runtime
+ # artifacts like console socket, PID file, etc.
+ run_dir: var/run
+
+ # Directory that stores log files.
+ log_dir: var/log
+
+ # Directory where write-ahead log (.xlog) files are stored.
+ wal_dir: var/lib
+
+ # Directory where memtx stores snapshot (.snap) files.
+ memtx_dir: var/lib
+
+ # Directory where vinyl files or subdirectories will be stored.
+ vinyl_dir: var/lib
+
+# Path to file with credentials for downloading Tarantool Enterprise Edition.
+# credential_path: /path/to/file
+ee:
+ credential_path:
+
+templates:
+ # The path to templates search directory.
+ - path: templates
+
+repo:
+ # Directory where local rocks files could be found.
+ rocks:
+ # Directory that stores installation files.
+ distfiles: distfiles
diff --git a/doc/concepts/configuration.rst b/doc/concepts/configuration.rst
new file mode 100644
index 0000000000..e235c58710
--- /dev/null
+++ b/doc/concepts/configuration.rst
@@ -0,0 +1,478 @@
+.. _configuration:
+
+Configuration
+=============
+
+Tarantool provides the ability to configure the full topology of a cluster and set parameters specific for concrete instances, such as connection settings, memory used to store data, logging, and snapshot settings.
+Each instance uses this configuration during :ref:`startup ` to organize the cluster.
+
+There are two approaches to configuring Tarantool:
+
+* *Since version 3.0*: In the YAML format.
+
+ YAML configuration allows you to provide the full cluster topology and specify all configuration options.
+ You can use local configuration in a YAML file for each instance or store configuration data in one reliable place using :ref:`etcd `.
+
+* *In version 2.11 and earlier*: :ref:`In code ` using the ``box.cfg`` API.
+
+ In this case, configuration is provided in a Lua initialization script.
+
+ .. NOTE::
+
+ Starting with the 3.0 version, configuring Tarantool in code is considered a legacy approach.
+
+
+.. _configuration_overview:
+
+Configuration overview
+----------------------
+
+YAML configuration describes the full topology of a Tarantool cluster.
+A cluster's topology includes the following elements, starting from the lower level:
+
+.. code-block:: yaml
+ :emphasize-lines: 1,3,5
+
+ groups:
+ group001:
+ replicasets:
+ replicaset001:
+ instances:
+ instance001:
+ # ...
+ instance002:
+ # ...
+
+- ``instances``
+
+ An *instance* represents a single running Tarantool instance.
+ It stores data or might act as a router for handling CRUD requests in a :ref:`sharded ` cluster.
+- ``replicasets``
+
+ A *replica set* is a pack of instances that operate on same data sets.
+ :ref:`Replication ` provides redundancy and increases data availability.
+- ``groups``
+
+ A *group* provides the ability to organize replica sets.
+ For example, in a sharded cluster, one group can contain :ref:`storage ` instances and another group can contain :ref:`routers ` used to handle CRUD requests.
+
+You can flexibly configure a cluster's settings on different levels: from global settings applied to all groups to parameters specific for concrete instances.
+
+
+.. _configuration_file:
+
+Configuration in a file
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This section provides an overview on how to configure Tarantool in a YAML file.
+
+.. _configuration_instance_basic:
+
+Basic instance configuration
+****************************
+
+The example below shows a sample configuration of a single Tarantool instance:
+
+.. literalinclude:: /code_snippets/snippets/config/instances.enabled/instance_scope/config.yaml
+ :language: yaml
+ :dedent:
+
+- The ``instances`` section includes only one instance named *instance001*.
+ The ``iproto.listen`` option sets a port used to listen for incoming requests.
+- The ``replicasets`` section contains one replica set named *replicaset001*.
+- The ``groups`` section contains one group named *group001*.
+
+
+.. _configuration_scopes:
+
+Configuration scopes
+********************
+
+This section shows how to control a scope the specified configuration option is applied to.
+Most of the configuration options can be applied to a specific instance, replica set, group, or to all instances globally.
+
+- *Instance*
+
+ To apply specific configuration options to a concrete instance,
+ specify such options for this instance only.
+ In the example below, ``iproto.listen`` is applied to *instance001* only.
+
+ .. literalinclude:: /code_snippets/snippets/config/instances.enabled/instance_scope/config.yaml
+ :language: yaml
+ :emphasize-lines: 7-8
+ :dedent:
+
+- *Replica set*
+
+ In this example, ``iproto.listen`` is in effect for all instances in *replicaset001*.
+
+ .. literalinclude:: /code_snippets/snippets/config/instances.enabled/replicaset_scope/config.yaml
+ :language: yaml
+ :emphasize-lines: 5-6
+ :dedent:
+
+- *Group*
+
+ In this example, ``iproto.listen`` is in effect for all instances in *group001*.
+
+ .. literalinclude:: /code_snippets/snippets/config/instances.enabled/group_scope/config.yaml
+ :language: yaml
+ :emphasize-lines: 3-4
+ :dedent:
+
+- *Global*
+
+ In this example, ``iproto.listen`` is applied to all instances of the cluster.
+
+ .. literalinclude:: /code_snippets/snippets/config/instances.enabled/global_scope/config.yaml
+ :language: yaml
+ :emphasize-lines: 1-2
+ :dedent:
+
+
+.. NOTE::
+
+ The :ref:`Configuration reference ` contains information about scopes to which each configuration option can be applied.
+
+
+.. _configuration_replica_set_scopes:
+
+Configuration scopes: Replica set example
+*****************************************
+
+The example below shows how specific configuration options work in different configuration scopes for a replica set with a manual failover.
+You can learn more about configuring replication from :ref:`Replication tutorials `.
+
+.. literalinclude:: /code_snippets/snippets/replication/instances.enabled/manual_leader/config.yaml
+ :language: yaml
+ :end-before: Load sample data
+ :dedent:
+
+- ``credentials`` (*global*)
+
+ This section is used to create the *replicator* user and assign it the specified role.
+ These options are applied globally to all instances.
+
+- ``iproto`` (*global*, *instance*)
+
+ The ``iproto`` section is specified on both global and instance levels.
+ The ``iproto.advertise.peer`` option specifies a URI used by an instance to connect to another instance as a replica.
+ In the example above, the URI includes a user name only.
+ A host value is taken from ``iproto.listen`` that is set on the instance level.
+
+- ``replication``: (*global*)
+
+ The ``replication.failover`` global option sets a manual failover for all replica sets.
+
+- ``leader``: (*replica set*)
+
+ The ``.leader`` option sets a :ref:`master ` instance for *replicaset001*.
+
+
+
+.. _configuration_application:
+
+Loading an application
+**********************
+
+Using Tarantool as an application server, you can run your own Lua applications.
+In the ``app`` section, you can load the application and provide a custom application configuration in the ``cfg`` section.
+
+In the example below, the application is loaded from the ``myapp.lua`` file placed next to the YAML configuration file:
+
+.. literalinclude:: /code_snippets/snippets/config/instances.enabled/application/config.yaml
+ :language: yaml
+ :dedent:
+
+To get a value of the custom ``greeting`` property in the application code,
+use the ``config:get()`` function provided by the :ref:`config ` module.
+
+.. literalinclude:: /code_snippets/snippets/config/instances.enabled/application/myapp.lua
+ :language: lua
+ :dedent:
+
+As a result of :ref:`starting ` the *instance001*, a log should contain the following line:
+
+.. code-block:: console
+
+ main/103/interactive/myapp I> Hello from app, instance001!
+
+The ``app`` section can be placed in any :ref:`configuration scope `.
+As an example use case, you can provide different applications for storages and routers in a sharded cluster:
+
+.. code-block:: yaml
+
+ groups:
+ storages:
+ app:
+ module: storage
+ # ...
+ routers:
+ app:
+ module: router
+ # ...
+
+Learn more about using Tarantool as the application server from :ref:`Developing applications with Tarantool `.
+
+
+
+.. _configuration_predefined_variables:
+
+Predefined variables
+********************
+
+In a configuration file, you can use the following predefined variables that are replaced with actual values at runtime:
+
+- ``instance_name``
+- ``replicaset_name``
+- ``group_name``
+
+To reference these variables in a configuration file, enclose them in double curly braces with whitespaces.
+In the example below, ``{{ instance_name }}`` is replaced with *instance001*.
+
+.. literalinclude:: /code_snippets/snippets/config/instances.enabled/templating/config.yaml
+ :language: yaml
+ :dedent:
+
+As a result, the :ref:`paths to snapshots and write-ahead logs ` differ for different instances.
+
+
+
+.. _configuration_environment_variable:
+
+Environment variables
+~~~~~~~~~~~~~~~~~~~~~
+
+For each configuration parameter, Tarantool provides two sets of predefined environment variables:
+
+* ``TT_``. These variables are used to substitute parameters specified in a configuration file.
+ This means that these variables have a higher :ref:`priority ` than the options specified in a configuration file.
+
+* ``TT__DEFAULT``. These variables are used to specify default values for parameters missing in a configuration file.
+ These variables have a lower :ref:`priority ` than the options specified in a configuration file.
+
+For example, ``TT_IPROTO_LISTEN`` and ``TT_IPROTO_LISTEN_DEFAULT`` correspond to the ``iproto.listen`` option.
+``TT_SNAPSHOT_DIR`` and ``TT_SNAPSHOT_DIR_DEFAULT`` correspond to the ``snapshot.dir`` option.
+To see all the supported environment variables, execute the ``tarantool`` command with the ``--help-env-list`` :ref:`option `.
+
+.. code-block:: console
+
+ $ tarantool --help-env-list
+
+Below are a few examples that show how to set environment variables of different types, like *string*, *number*, *array*, or *map*:
+
+* String. In the example below, ``TT_IPROTO_LISTEN`` is used to specify a :ref:`listening host and port ` values:
+
+ .. code-block:: console
+
+ $ export TT_IPROTO_LISTEN='127.0.0.1:3311'
+
+ To specify several listening addresses, separate them by a comma without space:
+
+ .. code-block:: console
+
+ $ export TT_IPROTO_LISTEN='127.0.0.1:3311,127.0.0.1:3312'
+
+* Number. In this example, ``TT_LOG_LEVEL`` is used to set a logging level to 3 (``CRITICAL``):
+
+ .. code-block:: console
+
+ $ export TT_LOG_LEVEL=3
+
+* Array. The examples below show how to set the ``TT_SHARDING_ROLES`` variable that accepts an array value.
+ Arrays can be passed in two ways: using a *simple* ...
+
+ .. code-block:: console
+
+ $ export TT_SHARDING_ROLES=router,storage
+
+ ... or *JSON* format:
+
+ .. code-block:: console
+
+ $ export TT_SHARDING_ROLES='["router", "storage"]'
+
+ The *simple* format is applicable only to arrays containing scalar values.
+
+* Map. To assign map values to environment variables, you can also use *simple* or *JSON* formats.
+ In the example below, ``TT_LOG_MODULES`` sets different logging levels for different modules using a *simple* format:
+
+ .. code-block:: console
+
+ $ export TT_LOG_MODULES=module1=info,module2=error
+
+ In the next example, ``TT_APP_CFG`` is used to specify the value of a custom configuration property for a :ref:`loaded application ` using a *JSON* format:
+
+ .. code-block:: console
+
+ $ export TT_APP_CFG='{"greeting":"Hi"}'
+
+ The *simple* format is applicable only to maps containing scalar values.
+
+
+.. NOTE::
+
+ There are also special ``TT_INSTANCE_NAME`` and ``TT_CONFIG`` environment variables that can be used to :ref:`start ` the specified Tarantool instance with configuration from the given file.
+
+
+
+
+
+
+.. _configuration_etcd_overview:
+
+Configuration in etcd
+~~~~~~~~~~~~~~~~~~~~~
+
+.. include:: /concepts/configuration/configuration_etcd.rst
+ :start-after: ee_note_etcd_start
+ :end-before: ee_note_etcd_end
+
+Tarantool enables you to store configuration data in one reliable place using `etcd `_.
+To achieve this, you need to:
+
+1. Provide a local YAML configuration with an etcd endpoint address and key prefix in the ``config`` section:
+
+ .. literalinclude:: /code_snippets/snippets/config/instances.enabled/etcd/config.yaml
+ :language: yaml
+ :dedent:
+
+2. Publish a cluster's configuration to an etcd server.
+
+Learn more from the following guide: :ref:`Storing configuration in etcd `.
+
+
+.. _configuration_precedence:
+
+Configuration precedence
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Tarantool configuration options are applied from multiple sources with the following precedence, from highest to lowest:
+
+- `TT_*` :ref:`environment variables `.
+- Configuration from a :ref:`local YAML file `.
+- :ref:`Centralized configuration ` stored in etcd.
+- `TT_*_DEFAULT` :ref:`environment variables `.
+
+If the same option is defined in two or more locations, the option with the highest precedence is applied.
+
+
+
+.. _configuration_options_overview:
+
+Configuration options overview
+------------------------------
+
+This section gives an overview of some useful configuration options.
+All the available options are documented in the :ref:`Configuration reference `.
+
+.. _configuration_options_connection:
+
+Connection settings
+~~~~~~~~~~~~~~~~~~~
+
+To configure an address used to listen for incoming requests, use the ``iproto.listen`` option.
+Below are a few examples on how to do this:
+
+* Set a listening port to ``3301``:
+
+ .. code-block:: yaml
+
+ iproto:
+ listen: "3301"
+
+* Set a listening address to ``127.0.0.1:3301``:
+
+ .. code-block:: yaml
+
+ iproto:
+ listen: "127.0.0.1:3301"
+
+
+* Configure several listening addresses:
+
+ .. code-block:: yaml
+
+ iproto:
+ listen: "127.0.0.1:3301,127.0.0.1:3303"
+
+* Enables :ref:`traffic encryption ` for a connection using corresponding URI parameters:
+
+ .. code-block:: yaml
+
+ iproto:
+ listen: "127.0.0.1:3301?transport=ssl&ssl_key_file=localhost.key&ssl_cert_file=localhost.crt&ssl_ca_file=ca.crt"
+
+ Note that traffic encryption is supported by the `Enterprise Edition `_ only.
+
+
+* Use a Unix domain socket:
+
+ .. code-block:: yaml
+
+ iproto:
+ listen: "unix/:./var/run/{{ instance_name }}/tarantool.iproto"
+
+
+.. _configuration_options_access_control:
+
+Access control
+~~~~~~~~~~~~~~
+
+The ``credentials`` section allows you to create users and grant them the specified privileges.
+In the example below, there are two users:
+
+* The *replicator* user is used for replication and has a corresponding role.
+* The *storage* user has the ``super`` role and can perform any action on Tarantool instances.
+
+.. literalinclude:: /code_snippets/snippets/sharding/instances.enabled/sharded_cluster/config.yaml
+ :language: yaml
+ :start-at: credentials:
+ :end-at: roles: [super]
+ :dedent:
+
+To learn more, see the :ref:`Access control ` section.
+
+
+.. _configuration_options_memory:
+
+Memory
+~~~~~~
+
+The ``memtx.memory`` option specifies how much :ref:`memory ` Tarantool allocates to actually store data.
+
+.. code-block:: yaml
+
+ memtx:
+ memory: 100000000
+
+When the limit is reached, ``INSERT`` or ``UPDATE`` requests fail with :ref:`ER_MEMORY_ISSUE `.
+
+
+.. _configuration_options_directories:
+
+Snapshots and write-ahead logs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``snapshot.dir`` and ``wal.dir`` options can be used to configure directories for storing snapshots and write-ahead logs.
+For example, you can place snapshots and write-ahead logs on different hard drives for better reliability.
+
+.. code-block:: yaml
+
+ instance001:
+ snapshot:
+ dir: '/media/drive1/snapshots'
+ wal:
+ dir: '/media/drive2/wals'
+
+To learn more about the persistence mechanism in Tarantool, see the :ref:`Persistence ` section.
+
+
+
+
+.. toctree::
+ :hidden:
+
+ configuration/configuration_etcd
+ configuration/configuration_code
+ configuration/configuration_migrating
diff --git a/doc/concepts/configuration/configuration_code.rst b/doc/concepts/configuration/configuration_code.rst
new file mode 100644
index 0000000000..09263fea9f
--- /dev/null
+++ b/doc/concepts/configuration/configuration_code.rst
@@ -0,0 +1,340 @@
+.. _configuration_code:
+
+Configuration in code
+=====================
+
+.. box_cfg_legacy_note_start
+
+.. NOTE::
+
+ Starting with the 3.0 version, the recommended way of configuring Tarantool is using a :ref:`configuration file `.
+ Configuring Tarantool in code is considered a legacy approach.
+
+.. box_cfg_legacy_note_end
+
+This topic covers the specifics of configuring Tarantool in code using the ``box.cfg`` API.
+In this case, a configuration is stored in an :ref:`initialization file ` - a Lua script with the specified configuration options.
+You can find all the available options in the :ref:`Configuration reference `.
+
+
+.. _index-init_label:
+
+Initialization file
+-------------------
+
+If the command to :ref:`start Tarantool ` includes an instance file, then
+Tarantool begins by invoking the Lua program in the file, which may have the name ``init.lua``.
+The Lua program may get further arguments
+from the command line or may use operating-system functions, such as ``getenv()``.
+The Lua program almost always begins by invoking ``box.cfg()``, if the database
+server will be used or if ports need to be opened. For example, suppose
+``init.lua`` contains the lines
+
+.. _index-init-example:
+
+.. code-block:: lua
+
+ #!/usr/bin/env tarantool
+ box.cfg{
+ listen = os.getenv("LISTEN_URI"),
+ memtx_memory = 33554432,
+ pid_file = "tarantool.pid",
+ wal_max_size = 2500
+ }
+ print('Starting ', arg[1])
+
+and suppose the environment variable ``LISTEN_URI`` contains 3301,
+and suppose the command line is ``tarantool init.lua ARG``.
+Then the screen might look like this:
+
+.. code-block:: console
+
+ $ export LISTEN_URI=3301
+ $ tarantool init.lua ARG
+ ... main/101/init.lua C> Tarantool 2.8.3-0-g01023dbc2
+ ... main/101/init.lua C> log level 5
+ ... main/101/init.lua I> mapping 33554432 bytes for memtx tuple arena...
+ ... main/101/init.lua I> recovery start
+ ... main/101/init.lua I> recovering from './00000000000000000000.snap'
+ ... main/101/init.lua I> set 'listen' configuration option to "3301"
+ ... main/102/leave_local_hot_standby I> ready to accept requests
+ Starting ARG
+ ... main C> entering the event loop
+
+If you wish to start an interactive session on the same terminal after
+initialization is complete, you can pass the ``-i`` :ref:`command-line option `.
+
+
+.. _box-cfg-params-env:
+
+Environment variables
+---------------------
+
+Starting from version :doc:`2.8.1 `, you can specify configuration parameters via special environment variables.
+The name of a variable should have the following pattern: ``TT_``,
+where ```` is the uppercase name of the corresponding :ref:`box.cfg parameter `.
+
+For example:
+
+* ``TT_LISTEN`` -- corresponds to the :ref:`box.cfg.listen ` option.
+* ``TT_MEMTX_DIR`` -- corresponds to the :ref:`box.cfg.memtx_dir ` option.
+
+In case of an array value, separate the array elements by a comma without space:
+
+.. code-block:: console
+
+ export TT_REPLICATION="localhost:3301,localhost:3302"
+
+If you need to pass :ref:`additional parameters for URI `, use the ``?`` and ``&`` delimiters:
+
+.. code-block:: console
+
+ export TT_LISTEN="localhost:3301?param1=value1¶m2=value2"
+
+An empty variable (``TT_LISTEN=``) has the same effect as an unset one, meaning that the corresponding configuration parameter won't be set when calling ``box.cfg{}``.
+
+
+
+.. _index-local_hot_standby:
+.. _index-replication_port:
+.. _index-slab_alloc_arena:
+.. _index-replication_source:
+.. _index-snap_dir:
+.. _index-wal_dir:
+.. _index-wal_mode:
+.. _index-checkpoint daemon:
+
+.. _box_cfg_params:
+
+
+Configuration parameters
+------------------------
+
+Configuration parameters have the form:
+
+:extsamp:`{**{box.cfg}**}{[{*{key = value}*} [, {*{key = value ...}*}]]}`
+
+Configuration parameters can be set in a Lua :ref:`initialization file `,
+which is specified on the Tarantool command line.
+
+Most configuration parameters are for allocating resources, opening ports, and
+specifying database behavior. All parameters are optional.
+Most of the parameters are dynamic, that is, they can be changed at runtime by calling ``box.cfg{}`` a second time.
+For example, the command below sets the :ref:`listen port ` to ``3301``.
+
+.. code-block:: tarantoolsession
+
+ tarantool> box.cfg{ listen = 3301 }
+ 2023-05-10 13:28:54.667 [31326] main/103/interactive I> tx_binary: stopped
+ 2023-05-10 13:28:54.667 [31326] main/103/interactive I> tx_binary: bound to [::]:3301
+ 2023-05-10 13:28:54.667 [31326] main/103/interactive/box.load_cfg I> set 'listen' configuration option to 3301
+ ---
+ ...
+
+
+To see all the non-null parameters, execute ``box.cfg`` (no parentheses).
+
+.. code-block:: tarantoolsession
+
+ tarantool> box.cfg
+ ---
+ - replication_skip_conflict: false
+ wal_queue_max_size: 16777216
+ feedback_host: https://feedback.tarantool.io
+ memtx_dir: .
+ memtx_min_tuple_size: 16
+ -- other parameters --
+ ...
+
+To see a particular parameter value, call a corresponding ``box.cfg`` option.
+For example, ``box.cfg.listen`` shows the specified :ref:`listen address `.
+
+.. code-block:: tarantoolsession
+
+ tarantool> box.cfg.listen
+ ---
+ - 3301
+ ...
+
+
+
+.. _index-uri:
+
+Listen URI
+----------
+
+Some configuration parameters and some functions depend on a URI (Universal Resource Identifier).
+The URI string format is similar to the
+`generic syntax for a URI schema `_.
+It may contain (in order):
+
+* user name for login
+* password
+* host name or host IP address
+* port number
+* query parameters
+
+Only a port number is always mandatory. A password is mandatory if a user
+name is specified unless the user name is 'guest'.
+
+Formally, the URI
+syntax is ``[host:]port`` or ``[username:password@]host:port``.
+If a host is omitted, then "0.0.0.0" or "[::]" is assumed,
+meaning respectively any IPv4 address or any IPv6 address
+on the local machine.
+If ``username:password`` is omitted, then the "guest" user is assumed. Some examples:
+
+.. container:: table
+
+ .. rst-class:: left-align-column-1
+ .. rst-class:: left-align-column-2
+
+ +-----------------------------+------------------------------+
+ | URI fragment | Example |
+ +=============================+==============================+
+ | port | 3301 |
+ +-----------------------------+------------------------------+
+ | host:port | 127.0.0.1:3301 |
+ +-----------------------------+------------------------------+
+ | username:password@host:port | notguest:sesame@mail.ru:3301 |
+ +-----------------------------+------------------------------+
+
+In code, the URI value can be passed as a number (if only a port is specified) or a string:
+
+.. code-block:: lua
+
+ box.cfg { listen = 3301 }
+
+ box.cfg { listen = "127.0.0.1:3301" }
+
+In certain circumstances, a Unix domain socket may be used
+where a URI is expected, for example, ``unix/:/tmp/unix_domain_socket.sock`` or
+simply ``/tmp/unix_domain_socket.sock``.
+
+The :ref:`uri ` module provides functions that convert URI strings into their
+components or turn components into URI strings.
+
+.. _index-uri-several:
+
+Specifying several URIs
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Starting from version 2.10.0, a user can open several listening iproto sockets on a Tarantool instance
+and, consequently, can specify several URIs in the configuration parameters
+such as :ref:`box.cfg.listen ` and :ref:`box.cfg.replication `.
+
+URI values can be set in a number of ways:
+
+* As a string with URI values separated by commas.
+
+ .. code-block:: lua
+
+ box.cfg { listen = "127.0.0.1:3301, /unix.sock, 3302" }
+
+* As a table that contains URIs in the string format.
+
+ .. code-block:: lua
+
+ box.cfg { listen = {"127.0.0.1:3301", "/unix.sock", "3302"} }
+
+* As an array of tables with the ``uri`` field.
+
+ .. code-block:: lua
+
+ box.cfg { listen = {
+ {uri = "127.0.0.1:3301"},
+ {uri = "/unix.sock"},
+ {uri = 3302}
+ }
+ }
+
+* In a combined way -- an array that contains URIs in both the string and the table formats.
+
+ .. code-block:: lua
+
+ box.cfg { listen = {
+ "127.0.0.1:3301",
+ { uri = "/unix.sock" },
+ { uri = 3302 }
+ }
+ }
+
+.. _index-uri-several-params:
+
+Also, starting from version 2.10.0, it is possible to specify additional parameters for URIs.
+You can do this in different ways:
+
+* Using the ``?`` delimiter when URIs are specified in a string format.
+
+ .. code-block:: lua
+
+ box.cfg { listen = "127.0.0.1:3301?p1=value1&p2=value2, /unix.sock?p3=value3" }
+
+* Using the ``params`` table: a URI is passed in a table with additional parameters in the "params" table.
+ Parameters in the "params" table overwrite the ones from a URI string ("value2" overwrites "value1" for ``p1`` in the example below).
+
+ .. code-block:: lua
+
+ box.cfg { listen = {
+ "127.0.0.1:3301?p1=value1",
+ params = {p1 = "value2", p2 = "value3"}
+ }
+ }
+
+* Using the ``default_params`` table for specifying default parameter values.
+
+ In the example below, two URIs are passed in a table.
+ The default value for the ``p3`` parameter is defined in the ``default_params`` table
+ and used if this parameter is not specified in URIs.
+ Parameters in the ``default_params`` table are applicable to all the URIs passed in a table.
+
+ .. code-block:: lua
+
+ box.cfg { listen = {
+ "127.0.0.1:3301?p1=value1",
+ { uri = "/unix.sock", params = { p2 = "value2" } },
+ default_params = { p3 = "value3" }
+ }
+ }
+
+The recommended way for specifying URI with additional parameters is the following:
+
+.. code-block:: lua
+
+ box.cfg { listen = {
+ {uri = "127.0.0.1:3301", params = {p1 = "value1"}},
+ {uri = "/unix.sock", params = {p2 = "value2"}},
+ {uri = 3302, params = {p3 = "value3"}}
+ }
+ }
+
+In case of a single URI, the following syntax also works:
+
+.. code-block:: lua
+
+ box.cfg { listen = {
+ uri = "127.0.0.1:3301",
+ params = { p1 = "value1", p2 = "value2" }
+ }
+ }
+
+
+
+.. _configuration_code_run_instance_tarantool:
+
+Starting a Tarantool instance
+-----------------------------
+
+Below is the syntax for starting a Tarantool instance configured in a Lua initialization script:
+
+.. code-block:: console
+
+ $ tarantool LUA_INITIALIZATION_FILE [OPTION ...]
+
+The ``tarantool`` command also provides a set of :ref:`options ` that might be helpful for development purposes.
+
+The command below starts a Tarantool instance configured in the ``init.lua`` file:
+
+.. code-block:: console
+
+ $ tarantool init.lua
diff --git a/doc/concepts/configuration/configuration_etcd.rst b/doc/concepts/configuration/configuration_etcd.rst
new file mode 100644
index 0000000000..2afc2217ba
--- /dev/null
+++ b/doc/concepts/configuration/configuration_etcd.rst
@@ -0,0 +1,177 @@
+.. _configuration_etcd:
+
+Storing configuration in etcd
+=============================
+
+.. ee_note_etcd_start
+
+.. admonition:: Enterprise Edition
+ :class: fact
+
+ Storing configuration in etcd is supported by the `Enterprise Edition `_ only.
+
+.. ee_note_etcd_end
+
+Tarantool enables you to store configuration data in one place using `etcd `_.
+To achieve this, you need to define how to access etcd and publish a cluster's :ref:`YAML configuration ` to an etcd server.
+
+
+.. _etcd_local_configuration:
+
+Local etcd configuration
+------------------------
+
+To store a cluster's configuration in etcd, you need to provide etcd connection settings in a local configuration file.
+These settings are used to :ref:`publish ` a cluster's configuration and :ref:`show ` it.
+
+Connection options for etcd should be specified in the ``config.etcd`` section of the configuration file.
+At least, the following options should be specified:
+
+.. literalinclude:: /code_snippets/snippets/config/instances.enabled/etcd/config.yaml
+ :language: yaml
+ :dedent:
+
+- :ref:`config.etcd.endpoints ` specifies the list of etcd endpoints.
+- :ref:`config.etcd.prefix ` sets a key prefix used to search a configuration. Tarantool searches keys by the following path: ``/config/*``. Note that ```` should start with a slash (``/``).
+
+You can also provide additional etcd connection options.
+In the example below, the following options are configured in addition to an endpoint and key prefix:
+
+.. literalinclude:: /code_snippets/snippets/config/instances.enabled/etcd_full/config.yaml
+ :language: yaml
+ :dedent:
+
+- :ref:`config.etcd.username ` and :ref:`config.etcd.password ` specify credentials used for authentication.
+- :ref:`config.etcd.ssl.ca_file ` specifies a path to a trusted certificate authorities (CA) file.
+- :ref:`config.etcd.http.request.timeout ` configures a request timeout for an etcd server.
+
+You can find all the available configuration options in the :ref:`etcd ` section.
+
+
+
+.. _etcd_publishing_configuration:
+
+Publishing a cluster's configuration to etcd
+--------------------------------------------
+
+.. _etcd_publishing_configuration_tt:
+
+Publishing configuration using the tt utility
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The tt utility provides the :ref:`tt cluster ` command for managing a cluster's configuration.
+The ``tt cluster publish`` command can be used to publish a cluster's configuration to etcd.
+
+The example below shows how a :ref:`layout ` of the application called ``app`` might look:
+
+.. code-block:: none
+
+ ├── tt.yaml
+ └── instances.enabled
+ └── app
+ ├── config.yaml
+ ├── cluster.yaml
+ └── instances.yml
+
+* ``config.yaml`` contains a :ref:`local configuration ` used to connect to etcd.
+* ``cluster.yaml`` contains a cluster's configuration to be published.
+* ``instances.yml`` specifies :ref:`instances ` to run in the current environment. ``tt cluster publish`` ignores the configured instances.
+
+To publish a cluster's configuration (``cluster.yaml``) to an etcd server, execute ``tt cluster publish`` as follows:
+
+.. code-block:: console
+
+ $ tt cluster publish "http://localhost:2379/example" instances.enabled/app/cluster.yaml
+
+.. NOTE::
+
+ You can see a cluster's configuration using the :ref:`tt cluster show ` command.
+
+
+.. _etcd_publishing_configuration_etcdctl:
+
+Publishing configuration using etcdctl
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To publish a cluster's configuration using the ``etcdctl`` utility, use the ``put`` command:
+
+.. code-block:: console
+
+ $ etcdctl put /example/config/all < cluster.yaml
+
+.. NOTE::
+
+ For etcd versions earlier than 3.4, you need to set the ``ETCDCTL_API`` environment variable to ``3``.
+
+
+
+
+.. _etcd_starting_instances:
+
+Starting Tarantool instances
+----------------------------
+
+The :ref:`tt ` utility is the recommended way to start Tarantool instances.
+You can learn how to do this from the :ref:`Starting and stopping instances ` section.
+
+You can also use the ``tarantool`` command to :ref:`start a Tarantool instance `.
+In this case, you can eliminate creating a :ref:`local etcd configuration ` and provide etcd connection settings using the ``TT_CONFIG_ETCD_ENDPOINTS`` and ``TT_CONFIG_ETCD_PREFIX`` :ref:`environment variables `.
+
+.. code-block:: console
+
+ $ export TT_CONFIG_ETCD_ENDPOINTS=http://localhost:2379
+ $ export TT_CONFIG_ETCD_PREFIX=/example
+
+ $ tarantool --name instance001
+ $ tarantool --name instance002
+ $ tarantool --name instance003
+
+
+
+
+.. _etcd_reloading_configuration:
+
+Reloading configuration
+-----------------------
+
+By default, Tarantool watches etcd keys with the :ref:`specified prefix ` for changes in a cluster's configuration and reloads a changed configuration automatically.
+If necessary, you can set the :ref:`config.reload ` option to ``manual`` to turn off configuration reloading:
+
+.. code-block:: yaml
+
+ config:
+ reload: 'manual'
+ etcd:
+ # ...
+
+In this case, you can reload a configuration in an :ref:`admin console ` or :ref:`application code ` using the ``reload()`` function provided by the :ref:`config ` module:
+
+.. code-block:: lua
+
+ require('config'):reload()
+
+
+
+
+
+
+
+
+
+..
+ Generating certificates for testing:
+ 1) openssl genrsa -out ca.key 2048
+ 2) openssl req -new -x509 -days 365 -key ca.key -subj "/C=CN/ST=GD/L=SZ/O=Acme, Inc./CN=Acme Root CA" -out ca.cr
+ 3) openssl req -newkey rsa:2048 -nodes -keyout server.key -subj "/C=CN/ST=GD/L=SZ/O=Acme, Inc./CN=localhost" -out server.csr
+ 4) openssl x509 -req -extfile <(printf "subjectAltName=DNS:localhost,IP:127.0.0.1") -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt
+ 5) sudo cp server.crt /etc/ssl/certs
+ 6) sudo cp server.key /etc/ssl/private
+
+ Starting etcd:
+ etcd --cert-file=ssl/server.crt --key-file=ssl/server.key --advertise-client-urls=https://localhost:2379 --listen-client-urls=https://localhost:2379
+
+ Get keys:
+ etcdctl get /tt/config/all --cert=ssl/server.crt --key=ssl/server.key
+
+ Test using curl:
+ curl --cacert ssl/ca.crt https://localhost:2379/v2/keys/foo -XPUT -d value=bar -v
\ No newline at end of file
diff --git a/doc/concepts/configuration/configuration_migrating.rst b/doc/concepts/configuration/configuration_migrating.rst
new file mode 100644
index 0000000000..c557d741ae
--- /dev/null
+++ b/doc/concepts/configuration/configuration_migrating.rst
@@ -0,0 +1,9 @@
+.. _configuration_migrating_declarative:
+
+Migrating to declarative configuration
+======================================
+
+.. TODO
+ https://github.com/tarantool/doc/issues/3661
+ 1) Configuration applying idempotence: how the config's 'target state' approach differs from the 'state changes' box.cfg() approach.
+ 2) How non-dynamic box.cfg() options are applied (no error, wait for restart).
diff --git a/doc/concepts/data_model/schema_desc.rst b/doc/concepts/data_model/schema_desc.rst
index cecf2566f6..21e9aefa03 100644
--- a/doc/concepts/data_model/schema_desc.rst
+++ b/doc/concepts/data_model/schema_desc.rst
@@ -86,14 +86,7 @@ The schema would look something like this:
This alternative is simpler to use, and you do not have to dive deep into Lua.
-``DDL`` is a built-in
-:doc:`Cartridge ` module.
-Cartridge is a cluster solution for Tarantool. In its WebUI, there is a separate tab
-called "Code". On this tab, in the ``schema.yml`` file, you can define the schema, check its correctness,
-and apply it to the whole cluster.
-
-If you do not use Cartridge, you can still use the DDL module:
-put the following Lua code into the file that you use to run Tarantool.
+To use the DDL module, put the following Lua code into the file that you use to run Tarantool.
This file is usually called ``init.lua``.
.. code:: lua
diff --git a/doc/concepts/index.rst b/doc/concepts/index.rst
index 60b72ec810..46bcfe065a 100644
--- a/doc/concepts/index.rst
+++ b/doc/concepts/index.rst
@@ -53,9 +53,6 @@ Application server
Using Tarantool as an application server, you can write
applications in Lua, C, or C++. You can also create reusable :ref:`modules `.
-A convenient way to serve a clustered application on Tarantool is using :ref:`Tarantool Cartridge ` --
-a framework for developing, deploying, and managing applications.
-
To increase the speed of code execution, Tarantool has a Lua Just-In-Time compiler (LuaJIT) on board.
LuaJIT compiles hot paths in the code -- paths that are used many times --
thus making the application work faster.
@@ -114,11 +111,11 @@ For details, check the :ref:`Storage engines ` section.
.. toctree::
:hidden:
+ configuration
data_model/index
coop_multitasking
atomic
modules
- Tarantool Cartridge
sharding/index
replication/index
triggers
diff --git a/doc/concepts/modules.rst b/doc/concepts/modules.rst
index ccd7cdcc2d..6fbc3b847e 100644
--- a/doc/concepts/modules.rst
+++ b/doc/concepts/modules.rst
@@ -3,9 +3,8 @@
Modules
=======
-Any logic that is used in Tarantool can be packaged as an application
-(like a :ref:`Cartridge application `) or a reusable **module**.
-A module is an optional library that enhances Tarantool functionality.
+Any logic that is used in Tarantool can be packaged as an application or a reusable **module**.
+A module is an optional library that extends Tarantool functionality.
It can be used by Tarantool applications or other modules.
Modules allow for easier code management and hot code reload without restarting the Tarantool instance.
Like applications, modules in Tarantool can be written in Lua,
diff --git a/doc/concepts/replication/index.rst b/doc/concepts/replication/index.rst
index 17194039f2..5d0e33f501 100644
--- a/doc/concepts/replication/index.rst
+++ b/doc/concepts/replication/index.rst
@@ -10,7 +10,7 @@ Replication allows multiple Tarantool instances to work on copies of the same
databases. The databases are kept in sync because each instance can communicate
its changes to all the other instances.
-This chapter includes the following sections:
+This section includes the following topics:
.. toctree::
:maxdepth: 2
@@ -20,9 +20,5 @@ This chapter includes the following sections:
repl_sync
repl_leader_elect
-For practical guides to replication, see the :ref:`How-to section `.
-You can learn about :ref:`bootstrapping a replica set `,
-:ref:`adding instances ` to the replica set
-or :ref:`removing them `,
-:ref:`using synchronous replication `
-and :ref:`managing leader elections `.
+For practical guides to replication, see :ref:`Replication tutorials `.
+You can learn about bootstrapping a replica set, adding instances to the replica set, or removing them.
diff --git a/doc/concepts/replication/repl_architecture.rst b/doc/concepts/replication/repl_architecture.rst
index ed8850816e..a5c1453167 100644
--- a/doc/concepts/replication/repl_architecture.rst
+++ b/doc/concepts/replication/repl_architecture.rst
@@ -47,7 +47,7 @@ The following are specifics of adding different types of information to the WAL:
* Data change operations on **replication-local** spaces (:doc:`created ` with ``is_local = true``) are written to the WAL but are not replicated.
-To learn how to enable replication, check the :ref:`Bootstrapping a replica set ` guide.
+To learn how to enable replication, check the :ref:`Bootstrapping a replica set ` guide.
.. _replication_stages:
diff --git a/doc/concepts/replication/repl_leader_elect.rst b/doc/concepts/replication/repl_leader_elect.rst
index 1031cd55b3..b6bc31d08c 100644
--- a/doc/concepts/replication/repl_leader_elect.rst
+++ b/doc/concepts/replication/repl_leader_elect.rst
@@ -11,7 +11,7 @@ on the base of Tarantool and decreases
dependency on external tools for replica set management.
To learn how to configure and monitor automated leader elections,
-check the :ref:`how-to guide `.
+check :ref:`Managing leader elections `.
The following topics are described below:
@@ -44,9 +44,9 @@ Leader election is described below.
The system behavior can be specified exactly according to the Raft algorithm. To do this:
* Ensure that the user has only synchronous spaces.
- * Set the :ref:`replication_synchro_quorum ` option to ``N / 2 + 1``.
- * Set the :ref:`replication_synchro_timeout ` option to infinity.
- * In the :ref:`election_fencing_mode ` option, select either the ``soft`` mode (the default)
+ * Set the :ref:`replication.synchro_quorum ` option to ``N / 2 + 1``.
+ * Set the :ref:`replication.synchro_timeout ` option to infinity.
+ * In the :ref:`replication.election_fencing_mode ` option, select either the ``soft`` mode (the default)
or the ``strict`` mode, which is more restrictive.
.. _repl_leader_elect_process:
@@ -71,11 +71,11 @@ for itself and sends vote requests to other nodes.
Upon receiving vote requests, a node votes for the first of them, and then cannot
do anything in the same term but wait for a leader to be elected.
-The node that collected a quorum of votes defined by the :ref:`replication_synchro_quorum ` parameter
+The node that collected a quorum of votes defined by the :ref:`replication.synchro_quorum ` parameter
becomes the leader
and notifies other nodes about that. Also, a split vote can happen
when no nodes received a quorum of votes. In this case,
-after a :ref:`random timeout `,
+after a random timeout,
each node increases its term and starts a new election round if no new vote
request with a greater term arrives during this time.
Eventually, a leader is elected.
@@ -87,7 +87,7 @@ All the non-leader nodes are called *followers*. The nodes that start a new
election round are called *candidates*. The elected leader sends heartbeats to
the non-leader nodes to let them know it is alive.
-In case there are no heartbeats for the period of :ref:`replication_timeout ` * 4,
+In case there are no heartbeats for the period of :ref:`replication.timeout ` * 4,
a non-leader node starts a new election if the following conditions are met:
* The node has a quorum of connections to other cluster members.
@@ -96,7 +96,7 @@ a non-leader node starts a new election if the following conditions are met:
.. note::
A cluster member considers the leader node to be alive if the member received heartbeats from the leader at least
- once during the ``replication_timeout * 4``,
+ once during the ``replication.timeout * 4``,
and there are no replication errors (the connection is not broken due to timeout or due to an error).
Terms and votes are persisted by each instance to preserve certain Raft guarantees.
@@ -105,7 +105,7 @@ During the election, the nodes prefer to vote for those ones that have the
newest data. So as if an old leader managed to send something before its death
to a quorum of replicas, that data wouldn't be lost.
-When :ref:`election is enabled `, there must be connections
+When election is enabled, there must be connections
between each node pair so as it would be the full mesh topology. This is needed
because election messages for voting and other internal things need a direct
connection between the nodes.
@@ -117,26 +117,26 @@ Once the leader is elected, it considers itself in the leader position until rec
This can lead to a split situation if the other nodes elect a new leader upon losing the connectivity to the previous one.
The issue is resolved in Tarantool version :doc:`2.10.0 ` by introducing the leader *fencing* mode.
-The mode can be switched by the :ref:`election_fencing_mode ` configuration parameter.
+The mode can be switched by the :ref:`replication.election_fencing_mode ` configuration parameter.
When the fencing is set to ``soft`` or ``strict``, the leader resigns its leadership if it has less than
-:ref:`replication_synchro_quorum ` of alive connections to the cluster nodes.
+:ref:`replication.synchro_quorum ` of alive connections to the cluster nodes.
The resigning leader receives the status of a follower in the current election term and becomes read-only.
-Leader *fencing* can be turned off by setting the :ref:`election_fencing_mode ` configuration parameter to ``off``.
+Leader *fencing* can be turned off by setting the :ref:`replication.election_fencing_mode ` configuration parameter to ``off``.
In ``soft`` mode, a connection is considered dead if there are no responses for
-:ref:`4*replication_timeout ` seconds both on the current leader and the followers.
+:ref:`4 * replication.timeout ` seconds both on the current leader and the followers.
In ``strict`` mode, a connection is considered dead if there are no responses
-for :ref:`2*replication_timeout ` seconds on the current leader and for
-:ref:`4*replication_timeout ` seconds on the followers.
+for :ref:`2 * replication.timeout ` seconds on the current leader and for
+:ref:`4 * replication.timeout ` seconds on the followers.
This improves chances that there is only one leader at any time.
-Fencing applies to the instances that have the :ref:`election_mode ` set to "candidate" or "manual".
+Fencing applies to the instances that have the :ref:`replication.election_mode ` set to "candidate" or "manual".
.. _repl_leader_elect_splitbrain:
There can still be a situation when a replica set has two leaders working independently (so-called *split-brain*).
-It can happen, for example, if a user mistakenly lowered the :ref:`replication_synchro_quorum ` below ``N / 2 + 1``.
+It can happen, for example, if a user mistakenly lowered the :ref:`replication.synchro_quorum ` below ``N / 2 + 1``.
In this situation, to preserve the data integrity, if an instance detects the split-brain anomaly in the incoming replication data,
it breaks the connection with the instance sending the data and writes the ``ER_SPLIT_BRAIN`` error in the log.
@@ -155,3 +155,99 @@ to the other nodes.
Term numbers also work as a kind of filter.
For example, if election is enabled on two nodes and ``node1`` has the term number less than ``node2``,
then ``node2`` doesn't accept any transactions from ``node1``.
+
+
+.. _how-to-repl_leader_elect:
+
+Managing leader elections
+-------------------------
+
+.. _repl_leader_elect_config:
+
+Configuration
+~~~~~~~~~~~~~
+
+.. code-block:: yaml
+
+ replication:
+ election_mode:
+ election_fencing_mode:
+ election_timeout:
+ timeout:
+ synchro_quorum:
+
+
+* :ref:`replication.election_mode ` -- specifies the role of a node in the leader election
+ process.
+* :ref:`replication.election_fencing_mode ` -- specifies the :ref:`leader fencing mode `.
+* :ref:`replication.election_timeout ` -- specifies the timeout between election rounds if the
+ previous round ended up with a split vote.
+* :ref:`replication.timeout ` -- a time interval (in seconds) used by a master to send heartbeat requests to a replica when there are no updates to send to this replica.
+* :ref:`replication.synchro_quorum ` -- a number of replicas that should confirm the receipt of a :ref:`synchronous ` transaction before it can finish its commit.
+
+It is important to know that being a leader is not the only requirement for a node to be writable.
+The leader should also satisfy the following requirements:
+
+* The :ref:`database.mode ` option is set to ``rw``.
+
+* The leader shouldn't be in the orphan state.
+
+Nothing prevents you from setting the ``database.mode`` option to ``ro``,
+but the leader won't be writable then. The option doesn't affect the
+election process itself, so a read-only instance can still vote and become
+a leader.
+
+.. _repl_leader_elect_monitoring:
+
+Monitoring
+~~~~~~~~~~
+
+To monitor the current state of a node regarding the leader election, use the :doc:`box.info.election ` function.
+
+**Example:**
+
+.. code-block:: console
+
+ tarantool> box.info.election
+ ---
+ - state: follower
+ vote: 0
+ leader: 0
+ term: 1
+ ...
+
+The Raft-based election implementation logs all its actions
+with the ``RAFT:`` prefix. The actions are new Raft message handling,
+node state changing, voting, and term bumping.
+
+.. _repl_leader_elect_important:
+
+Important notes
+~~~~~~~~~~~~~~~
+
+Leader election doesn't work correctly if the election quorum is set to less or equal
+than `` / 2``. In that case, a split vote can lead to
+a state when two leaders are elected at once.
+
+For example, suppose there are five nodes. When the quorum is set to ``2``, ``node1``
+and ``node2`` can both vote for ``node1``. ``node3`` and ``node4`` can both vote
+for ``node5``. In this case, ``node1`` and ``node5`` both win the election.
+When the quorum is set to the cluster majority, that is
+``( / 2) + 1`` or greater, the split vote is impossible.
+
+That should be considered when adding new nodes.
+If the majority value is changing, it's better to update the quorum on all the existing nodes
+before adding a new one.
+
+Also, the automated leader election doesn't bring many benefits in terms of data
+safety when used *without* :ref:`synchronous replication `.
+If the replication is asynchronous and a new leader gets elected,
+the old leader is still active and considers itself the leader.
+In such case, nothing stops
+it from accepting requests from clients and making transactions.
+Non-synchronous transactions are successfully committed because
+they are not checked against the quorum of replicas.
+Synchronous transactions fail because they are not able
+to collect the quorum -- most of the replicas reject
+these old leader's transactions since it is not a leader anymore.
+
diff --git a/doc/concepts/replication/repl_sync.rst b/doc/concepts/replication/repl_sync.rst
index 40b21ff4b0..21a70d414e 100644
--- a/doc/concepts/replication/repl_sync.rst
+++ b/doc/concepts/replication/repl_sync.rst
@@ -15,8 +15,7 @@ to a replica, from the client's point of view the transaction will disappear.
are not considered committed and are not responded to a client until they are
replicated onto some number of replicas.
-To learn how to enable and use synchronous replication,
-check the :ref:`guide `.
+To enable synchronous replication, use the :ref:`space_opts.is_sync ` option when creating or altering a space.
Synchronous and asynchronous transactions
-----------------------------------------
diff --git a/doc/contributing/contributing.rst b/doc/contributing/contributing.rst
index 5c9dd555d7..76840ff1df 100644
--- a/doc/contributing/contributing.rst
+++ b/doc/contributing/contributing.rst
@@ -132,8 +132,8 @@ There are several ways to improve the documentation:
see how it works. This can be done automatically in Docker.
To learn more, check the `README of the tarantool/doc repository `_.
-Some projects, like `Tarantool Cartridge `_,
-have their documentation in the code repository.
+Some Tarantool projects have their documentation in the code repository.
+This is typical for modules, for example, `metrics `_.
This is done on purpose, so the developers themselves can update it faster.
You can find instructions for building such documentation in the code repository.
@@ -161,8 +161,6 @@ Here are some of our official modules:
the persistent message queue.
* `metrics `_: Ready-to-use solution for
collecting metrics.
-* `cartridge `_: Framework for writing
- distributed applications.
Official modules are provided in our organization on GitHub.
@@ -311,11 +309,8 @@ help with application deployment, or allow working with Kubernetes.
Here are some of the tools created by the Tarantool team:
-* `ansible-cartridge `_:
- an Ansible role to deploy Cartridge applications.
-* `cartridge-cli `_:
- a CLI utility for creating applications, launching clusters locally on Cartridge,
- and solving operation problems.
+* `tt `_:
+ a CLI utility for creating and managing Tarantool applications.
* `tarantool-operator `_:
a Kubernetes operator for cluster orchestration.
diff --git a/doc/contributing/docs/infra.rst b/doc/contributing/docs/infra.rst
index 7276247a1a..0271c6e2a5 100644
--- a/doc/contributing/docs/infra.rst
+++ b/doc/contributing/docs/infra.rst
@@ -15,9 +15,8 @@ The documentation source files are mainly stored in the
`documentation repository `_.
However, in some cases, they are stored in the
repositories of other Tarantool-related products
-or modules -- `Cartridge `_,
-`Monitoring `__,
-and others.
+or modules, such as
+`Monitoring `__.
If you are working with source files from a product or module repository,
add that repository as a submodule to the
@@ -103,32 +102,6 @@ The ``${project_root}`` variable is defined earlier in the file as ``project_roo
This is because the documentation build has to start from the documentation repository root
directory.
-cartridge_cli
-^^^^^^^^^^^^^
-
-The content source file for the ``cartridge_cli`` submodule is
-``README.rst``, located in the directory of the submodule repository.
-In the final documentation view, the content should appear here:
-``https://www.tarantool.io/en/doc/latest/book/cartridge/cartridge_cli/``.
-
-To make this work:
-
-* Create a directory at ``./doc/book/cartridge/cartridge_cli``.
-* Copy ``./modules/cartridge_cli/README.rst`` to
- ``./doc/book/cartridge/cartridge_cli/index.rst``.
-
-Here ar the corresponding settings in ``build_submodules.sh``:
-
-.. code-block:: bash
-
- rst_dest="${project_root}/doc/book/cartridge"
- cartridge_cli_root="${project_root}/modules/cartridge-cli"
- cartridge_cli_dest="${rst_dest}/cartridge_cli"
- cartridge_cli_index_dest="${cartridge_cli_dest}/index.rst"
-
- mkdir -p "${cartridge_cli_dest}"
- yes | cp -rf "${cartridge_cli_root}/README.rst" "${cartridge_cli_index_dest}"
-
.. _guidelines_doc_submodules_gitignore:
3. Update .gitignore
diff --git a/doc/contributing/docs/localization/_includes/glossary-cartridge.csv b/doc/contributing/docs/localization/_includes/glossary-cartridge.csv
deleted file mode 100644
index 67502c4740..0000000000
--- a/doc/contributing/docs/localization/_includes/glossary-cartridge.csv
+++ /dev/null
@@ -1,10 +0,0 @@
-Term [en];Term [ru];Description [en];Description [ru]
-;приложение на Tarantool Cartridge;;Если без предлога, то теряется смысл: читается так, как будто Tarantool Cartridge — это название приложения. А это не так.
-Tarantool Cartridge application;Tarantool Cartridge — это фреймворк;;" на базе которого можно разработать свое приложение."""
-Cartridge;Cartridge;;
-production environment;производственная среда;Production environment is a term used mostly by developers to describe the setting where software and other products are actually put into operation for their intended uses by end users.;
-failover;восстановление после сбоев;In computing and related technologies such as networking, failover is switching to a redundant or standby computer server, system, hardware component or network upon the failure or abnormal termination of the previously active application, server, system, hardware component, or network.;
-replicaset;набор реплик;;
-directory;директория;;
-bucket;сегмент;;
-check;выберите, выбрать;To select a checkbox;
\ No newline at end of file
diff --git a/doc/contributing/docs/localization/_includes/glossary.csv b/doc/contributing/docs/localization/_includes/glossary.csv
index 5dc02a7245..8cdfb97728 100644
--- a/doc/contributing/docs/localization/_includes/glossary.csv
+++ b/doc/contributing/docs/localization/_includes/glossary.csv
@@ -1,7 +1,6 @@
Term [en];Term [ru];Description [en];Description [ru]
space;спейс;A space is a container for tuples.;
-;"https://www.tarantool.io/en/doc/latest/book/box/data_model/#spaces""";;NOUN
-tuple;кортеж;A tuple plays the same role as a “row” or a “record”. The number of tuples in a space is unlimited. Tuples in Tarantool are stored as MsgPack arrays. https://www.tarantool.io/en/doc/latest/book/box/data_model/#tuples;
+tuple;кортеж;A tuple plays the same role as a “row” or a “record”. The number of tuples in a space is unlimited. Tuples in Tarantool are stored as MsgPack arrays.;
Tarantool;Tarantool;НЕ ПЕРЕВОДИТЬ;
primary index;первичный индекс;The first index defined on a space is called the primary key index, and it must be unique. All other indexes are called secondary indexes, and they may be non-unique. https://www.tarantool.io/en/doc/latest/book/box/data_model/#indexes;
fiber;файбер;A fiber is a set of instructions which are executed with cooperative multitasking. Fibers managed by the fiber module are associated with a user-supplied function called the fiber function. https://www.tarantool.io/en/doc/latest/reference/reference_lua/fiber/#fibers;
@@ -16,8 +15,6 @@ implicit casting;неявное приведение типов;;
database;база данных;;
Release policy;Релизная политика;A set of rules for releasing and naming new distributions of Tarantool: where we add new features and where we don't, how we give them numbers, what versions are suitable to use in production.;
field;поле;Fields are distinct data values, contained in a tuple. They play the same role as «row columns» or «record fields» in relational databases.;
-;;;
-;"https://www.tarantool.io/ru/doc/latest/book/box/data_model/#term-field""";;NOUN
leader election;выборы лидера;(in a replica set, by the Raft algorithm);
replica set;набор реплик;;
heartbeat;контрольный сигнал;;
@@ -43,14 +40,18 @@ expression;выражение;;
predicate;предикат;(SQL) Predicates, which specify conditions that can be evaluated to SQL three-valued logic (3VL) (true/false/unknown) or Boolean truth values and are used to limit the effects of statements and queries, or to change program flow.;
query;запрос;(SQL) Queries retrieve the data based on specific criteria. A query is a statement that returns a result set (possibly empty).;
result set;результат запроса;(SQL) An SQL result set is a set of rows from a database, as well as metadata about the query such as the column names, and the types and sizes of each column. A result set is effectively a table.;
-resultset;результат запроса;(SQL) An SQL result set is a set of rows from a database, as well as metadata about the query such as the column names, and the types and sizes of each column. A result set is effectively a table.;
statement;инструкция;(SQL) A statement is any text that the database engine recognizes as a valid command.;(SQL) Любой текст, который распознаётся движком БД как команда. Инструкция состоит из ключевых слов и выражений языка SQL, которые предписывают Tarantool выполнять какие-либо действия с базой данных.
-;"Tarantool: A statement consists of SQL-language keywords and expressions that direct Tarantool to do something with a database. https://www.tarantool.io/en/doc/latest/reference/reference_sql/sql_user_guide/#statements""";;
+;;"Tarantool: A statement consists of SQL-language keywords and expressions that direct Tarantool to do something with a database. https://www.tarantool.io/en/doc/latest/reference/reference_sql/sql_user_guide/#statements""";;
batch;пакет (инструкций);(SQL) A series of SQL statements sent to the server at once is called a batch.;(SQL) Серия SQL-инструкций (statements), отправляемая на сервер вместе
production configuration;конфигурация производственной среды;;
-deployment;;Transforming a mechanical, electrical, or computer system from a packaged to an operational state. IT infrastructure deployment typically involves defining the sequence of operations or steps, often referred to as a deployment plan, that must be carried to deliver changes into a target system environment.;
+deployment;развертывание;Transforming a mechanical, electrical, or computer system from a packaged to an operational state. IT infrastructure deployment typically involves defining the sequence of operations or steps, often referred to as a deployment plan, that must be carried to deliver changes into a target system environment.;
roll back;отменить;;транзакцию
deploy to production;;IT infrastructure deployment typically involves defining the sequence of operations or steps, often referred to as a deployment plan, that must be carried to deliver changes into a target system environment. Production environment is a setting where software and other products are actually put into operation for their intended uses by end users;
operations;эксплуатация;(DevOps) Information technology operations, or IT operations, are the set of all processes and services that are both provisioned by an IT staff to their internal or external clients and used by themselves, to run themselves as a business. ;
to deploy;;Transforming a mechanical, electrical, or computer system from a packaged to an operational state. IT infrastructure deployment typically involves defining the sequence of operations or steps, often referred to as a deployment plan, that must be carried to deliver changes into a target system environment.;
-deployment plan;;A sequence of operations or steps that must be carried to deliver changes into a target system environment.;
\ No newline at end of file
+deployment plan;;A sequence of operations or steps that must be carried to deliver changes into a target system environment.;
+production environment;производственная среда;Production environment is a term used mostly by developers to describe the setting where software and other products are actually put into operation for their intended uses by end users.;
+failover;восстановление после сбоев;In computing and related technologies such as networking, failover is switching to a redundant or standby computer server, system, hardware component or network upon the failure or abnormal termination of the previously active application, server, system, hardware component, or network.;
+directory;директория;;
+bucket;сегмент;;
+select;выберите, выбрать;To select a checkbox;
\ No newline at end of file
diff --git a/doc/contributing/docs/localization/glossaries.rst b/doc/contributing/docs/localization/glossaries.rst
index 97fb063058..b9917c9747 100644
--- a/doc/contributing/docs/localization/glossaries.rst
+++ b/doc/contributing/docs/localization/glossaries.rst
@@ -1,18 +1,8 @@
Glossaries
==========
-Tarantool Core
---------------
-
.. csv-table::
:file: _includes/glossary.csv
:header-rows: 1
:delim: ;
-Cartridge
----------
-
-.. csv-table::
- :file: _includes/glossary-cartridge.csv
- :header-rows: 1
- :delim: ;
diff --git a/doc/contributing/docs/localization/locstate.rst b/doc/contributing/docs/localization/locstate.rst
index 80c38d5255..8c38ac3754 100644
--- a/doc/contributing/docs/localization/locstate.rst
+++ b/doc/contributing/docs/localization/locstate.rst
@@ -15,18 +15,10 @@ State of localization
- |doc|
- 352 000
- * - Cartridge
- - |cartridge|
- - 14 000
-
* - Tarantool Ansible Role
- |tarantool-ansible-role|
- 11 000
- * - Cartridge CLI
- - |cartridge-cli|
- - 6 500
-
* - Tarantool Enterprise Edition
- |tarantool-enterprise|
- 6 000
@@ -58,12 +50,6 @@ State of localization
.. |doc| image:: https://badges.crowdin.net/tarantool-docs/localized.svg
:target: https://crowdin.com/project/tarantool-docs/ru#
-.. |cartridge| image:: https://badges.crowdin.net/tarantool-cartridge-docs/localized.svg
- :target: https://crowdin.com/project/tarantool-cartridge-docs/ru#
-
-.. |cartridge-cli| image:: https://badges.crowdin.net/tarantool-cartridge-cli/localized.svg
- :target: https://crowdin.com/project/tarantool-cartridge-cli/ru#
-
.. |tarantool-enterprise| image:: https://badges.crowdin.net/tarantool-enterprise-docs/localized.svg
:target: https://crowdin.com/project/tarantool-enterprise-docs/ru#
diff --git a/doc/contributing/docs/sphinx-warnings.rst b/doc/contributing/docs/sphinx-warnings.rst
index e7743614d1..51210c2c75 100644
--- a/doc/contributing/docs/sphinx-warnings.rst
+++ b/doc/contributing/docs/sphinx-warnings.rst
@@ -163,7 +163,7 @@ This may happen when you refer to a wrong path to a document.
Check the path.
-If the path points to ``cartridge`` or another submodule, check that you've
+If the path points to a submodule, check that you've
:doc:`built the submodules content `
before building docs.
diff --git a/doc/enterprise/admin.rst b/doc/enterprise/admin.rst
index 8f9d19b1bf..a5a2578d2f 100644
--- a/doc/enterprise/admin.rst
+++ b/doc/enterprise/admin.rst
@@ -1,119 +1,6 @@
.. _enterprise-admin:
-===============================================================================
Cluster administrator's guide
-===============================================================================
+=============================
-This guide focuses on Enterprise-specific administration features available
-on top of Tarantool Community Edition with Tarantool Cartridge framework:
-
-* :ref:`space explorer `
-* :ref:`upgrade of environment-independent applications in production `
-
-Otherwise, consult the following documentation for:
-
-* basic information on
- :doc:`deploying and managing a Tarantool cluster `
-* more information on
- :doc:`managing Tarantool instances `
-
-.. _space_explorer:
-
--------------------------------------------------------------------------------
-Exploring spaces
--------------------------------------------------------------------------------
-
-The web interface lets you connect (in the browser) to any instance in the cluster
-and see what spaces it stores (if any) and their contents.
-
-To explore spaces:
-
-#. Open the **Space Explorer** tab in the menu on the left:
-
- .. image:: images/space_explr_tab.png
- :align: center
- :scale: 80%
-
-#. Click **connect** next to an instance that stores data. The basic sanity-check
- (``test.py``) of the example application puts sample data to one replica
- set (shard), so its master and replica store the data in their spaces:
-
- .. image:: images/spaces_with_data.png
- :align: center
- :scale: 80%
-
- When connected to a instance, the space explorer shows a table with basic
- information on its spaces. For more information, see the
- :doc:`box.space reference `.
-
- To see hidden spaces, tick the corresponding checkbox:
-
- .. image:: images/hidden_spaces.png
- :align: center
- :scale: 80%
-
-#. Click the space's name to see its format and contents:
-
- .. image:: images/space_contents.png
- :align: center
- :scale: 70%
-
- To search the data, select an index and, optionally, its iteration type from
- the drop-down lists, and enter the index value:
-
- .. image:: images/space_search.png
- :align: center
- :scale: 80%
-
-.. _enterprise-production-upgrade:
-
--------------------------------------------------------------------------------
-Upgrading in production
--------------------------------------------------------------------------------
-
-To upgrade either a single instance or a cluster, you need a new version of the
-packaged (archived) application.
-
-A single instance upgrade is simple:
-
-#. Upload the package (archive) to the server.
-#. Stop the current instance.
-#. Deploy the new one as described in :ref:`deploying packaged applications `
- (or :ref:`archived ones `).
-
-.. _enterprise-cluster-upgrade:
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Cluster upgrade
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To upgrade a cluster, choose one of the following scenarios:
-
-* **Cluster shutdown**. Recommended for backward-incompatible updates, requires
- downtime.
-
-* **Instance by instance**. Recommended for backward-compatible updates, does
- not require downtime.
-
-To upgrade the cluster, do the following:
-
-#. Schedule a downtime or plan for the instance-by-instance upgrade.
-
-#. Upload a new application package (archive) to all servers.
-
-Next, execute the chosen scenario:
-
-* **Cluster shutdown**:
-
- #. Stop all instances on all servers.
- #. Deploy the new package (archive) on every server.
-
-* **Instance by instance**. Do the following in every replica set in succession:
-
- #. Stop a replica on any server.
- #. Deploy the new package (archive) in place of the old replica.
- #. Promote the new replica to a master (see
- :ref:`Switching the replica set's master `
- section in the Tarantool manual).
- #. Redeploy the old master and the rest of the instances in the replica set.
- #. Be prepared to resolve possible logic conflicts.
+.. TODO: rewrite for TCM instead of Cartridge
\ No newline at end of file
diff --git a/doc/enterprise/audit.rst b/doc/enterprise/audit.rst
index 15d26a16da..927d5f7266 100644
--- a/doc/enterprise/audit.rst
+++ b/doc/enterprise/audit.rst
@@ -43,9 +43,9 @@ is not sufficiently secure.
Closed HTTP ports
-----------------
-Tarantool accepts HTTP connections on a specific port, configured with
-``http_port: `` value
-(see :ref:`configuring Cartridge instances `).
+.. TODO: update for new EE config
+
+Tarantool accepts HTTP connections on a specific port.
It must be only available on the same host for nginx to connect to it.
Check that the configured HTTP port is closed
@@ -103,8 +103,6 @@ Authorization in the web UI
---------------------------
Using the web interface must require logging in with a username and password.
-See more details in the documentation on
-:ref:`configuring web interface authorization `.
Running under the tarantool user
--------------------------------
@@ -128,8 +126,7 @@ This should be checked on each Tarantool instance.
The :ref:`snapshot_count ` value
determines the number of kept snapshots.
Configuration values are primarily set in the configuration files
-but :doc:`can be overridden `
-with environment variables and command-line arguments.
+but can be overridden with environment variables and command-line arguments.
So, it's best to check both the values in the configuration files and the actual values
using the console:
diff --git a/doc/enterprise/cartridge-auth.rst b/doc/enterprise/cartridge-auth.rst
index 3cd3a7b796..858fbd5753 100644
--- a/doc/enterprise/cartridge-auth.rst
+++ b/doc/enterprise/cartridge-auth.rst
@@ -3,141 +3,4 @@
LDAP authorization
==================
-This chapter describes how to manage the access roles for LDAP users authorizing in your Cartridge application.
-
-Setting up this feature is twofold:
-
-* :ref:`enabling the feature ` for your application
-* :ref:`specifying configuration parameters `.
-
-.. note::
-
- For information on setting up the authorization of external users in your application, refer to :ref:`ldap_auth`.
-
-.. _enterprise-cartridge-auth-enable:
-
-Enabling LDAP authorization
----------------------------
-
-First, you should enable LDAP authorization function in your :ref:`application development project `:
-
-* set up dependency to the ``cartridge-auth-extension`` module that is available in the :ref:`Enterprise Edition's package `.
-* update the configuration in the application initialization file.
-
-.. note::
-
- If you don't have a development project yet, refer to :doc:`dev` on how to create it.
-
-1. In your development project, find a ``.rockspec`` file and specify the following dependency:
-
- .. code-block:: bash
-
- dependencies = {
- 'cartridge-auth-extension'
- }
-
-2. In an initialization Lua file of your project, specify the ``cartridge-auth-extension`` :ref:`cluster role ` in the :ref:`Cartridge configuration `.
- The role enables storing authorized users and validating the :ref:`LDAP configuration `.
-
- .. code-block:: lua
-
- cartridge.cfg({
- roles = {
- 'cartridge-auth-extension',
- },
- auth_backend_name = 'cartridge-auth-extension',
- })
-
-3. Deploy and start your application. For details, refer to :doc:`dev`.
-
-.. _enterprise-cartridge-auth-config:
-
-Configuring LDAP authorization
-------------------------------
-
-After starting your application, you need to configure LDAP authorization. It can be done via the GUI administrative console.
-
-1. In a web browser, open the GUI administrative console of your application.
-
-2. If you have the application instances already configured, proceed to the next step. Otherwise, refer to :ref:`cartridge-deployment` on how to configure the cluster.
-
-3. In the GUI administrative console, navigate to the **Code** tab. Create the following YAML configuration files and specify the necessary parameters.
- Below is the example of configuration and the :ref:`description of parameters `.
-
-.. note::
-
- If you set the authorization mode as ``local`` in the ``auth_extension.yml`` file, you don't need to define LDAP configuration parameters in the ``ldap.yml`` file.
-
-
-* ``auth_extension.yml``
-
- .. code-block:: yaml
-
- method: local+ldap
-
-* ``ldap.yml``
-
- .. code-block:: yaml
-
- - domain: 'test.glauth.com'
- organizational_units: ['all_staff']
- hosts:
- - localhost:3893
- use_tls: false
- use_active_directory: false
- search_timeout: 2
- roles:
- - role: 'admin'
- domain_groups:
- - 'cn=superusers,ou=groups,dc=glauth,dc=com'
- - 'cn=users,ou=groups,dc=glauth,dc=com'
- options:
- LDAP_OPT_DEBUG_LEVEL: 10
-
-* ``auth.yml``
-
- .. code-block:: yaml
-
- enabled: true
-
-.. _enterprise-cartridge-auth-config-params:
-
-**Configuration parameters:**
-
-* ``method`` -- authorization mode. Possible values:
-
- * ``local`` -- only local users can be authorized in the application. "Local" refers to users created in the application.
- * ``ldap`` -- only LDAP users can be authorized.
- * ``local+ldap`` -- both local and LDAP users can be authorized.
-
-* ``domain`` -- domain name that is used in the domain login ID (``user_name@domain``).
-
-* ``organizational_units`` -- names of the organizational units or user groups.
-
-* ``hosts`` -- LDAP server addresses.
-
-* ``use_tls`` -- boolean flag that defines TLS usage. Defaults to ``false``.
-
-* ``use_active_directory`` -- boolean flag that defines usage of the Active Directory. Defaults to ``false``.
- If set to ``true``, use the login ID in the email format (``user_name@domain``).
- The ID should be equal to the ``userPrincipalName`` Active Directory attribute value because the latter is used in the Active Directory filter.
-
-* ``search_timeout`` -- LDAP server response timeout. Defaults to 2 seconds.
-
-* ``roles`` -- user roles assigned to a user depending on the LDAP groups the user belongs to:
-
- * ``role`` -- user role;
- * ``domain_groups`` -- LDAP groups where ``cn`` -- common name; ``ou`` -- organization unit name; ``dc`` -- domain component.
-
-* ``options`` -- the OpenLDAP library options. Supported options:
-
- * LDAP_OPT_X_TLS_REQUIRE_CERT
- * LDAP_OPT_PROTOCOL_VERSION
- * LDAP_OPT_DEBUG_LEVEL
- * LDAP_OPT_X_TLS_CACERTFILE
- * LDAP_OPT_X_TLS_CACERTDIR.
-
- For description of the options, refer to the `OpenLDAP documentation `__.
-
-* ``enabled`` -- boolean flag. If set to ``true``, enables mandatory authentication mode in the application web interface.
-
+.. TODO: rewrite for TCM and rename the file
\ No newline at end of file
diff --git a/doc/enterprise/dev.rst b/doc/enterprise/dev.rst
index b1c5a08220..f16cc41598 100644
--- a/doc/enterprise/dev.rst
+++ b/doc/enterprise/dev.rst
@@ -1,448 +1,6 @@
.. _enterprise-app-development:
-===============================================================================
Developer's guide
-===============================================================================
+=================
-To develop an application, use the Tarantool Cartridge framework that is
-:ref:`installed ` as part of Tarantool Enterprise Edition.
-
-Here is a summary of the commands you need:
-
-#. Create a cluster-aware application from the template:
-
- .. code-block:: bash
-
- $ tt create cartridge --name -d /path/to
-
-#. Develop your application:
-
- .. code-block:: bash
-
- $ cd /path/to/
- $ ...
-
-#. Package your application:
-
- .. code-block:: bash
-
- $ tt pack [rpm|tgz] /path/to/
-
-#. Deploy your application:
-
- * For ``rpm`` package:
-
- 1. Upload the package to all servers dedicated to Tarantool.
- 2. Install the package:
-
- .. code-block:: bash
-
- $ yum install -.rpm
-
- 3. Launch the application.
-
- .. code-block:: bash
-
- $ systemctl start
-
- * For ``tgz`` archive:
-
- 1. Upload the archive to all servers dedicated to Tarantool.
- 2. Unpack the archive:
-
- .. code-block:: bash
-
- $ tar -xzvf