diff --git a/site/en/guide/tpu.ipynb b/site/en/guide/tpu.ipynb index d4376d1455..49eee544be 100644 --- a/site/en/guide/tpu.ipynb +++ b/site/en/guide/tpu.ipynb @@ -416,7 +416,7 @@ "source": [ "### Train the model using a custom training loop\n", "\n", - "You can also create and train your model using `tf.function` and `tf.distribute` APIs directly. You can use the `Strategy.experimental_distribute_datasets_from_function` API to distribute the `tf.data.Dataset` given a dataset function. Note that in the example below the batch size passed into the `Dataset` is the per-replica batch size instead of the global batch size. To learn more, check out the [Custom training with `tf.distribute.Strategy`](../tutorials/distribute/custom_training.ipynb) tutorial.\n" + "You can also create and train your model using `tf.function` and `tf.distribute` APIs directly. You can use the `Strategy.distribute_datasets_from_function` API to distribute the `tf.data.Dataset` given a dataset function. Note that in the example below the batch size passed into the `Dataset` is the per-replica batch size instead of the global batch size. To learn more, check out the [Custom training with `tf.distribute.Strategy`](../tutorials/distribute/custom_training.ipynb) tutorial.\n" ] }, { @@ -449,7 +449,7 @@ "# on each TPU worker.\n", "per_replica_batch_size = batch_size // strategy.num_replicas_in_sync\n", "\n", - "train_dataset = strategy.experimental_distribute_datasets_from_function(\n", + "train_dataset = strategy.distribute_datasets_from_function(\n", " lambda _: get_dataset(per_replica_batch_size, is_training=True))\n", "\n", "@tf.function\n", @@ -600,4 +600,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file