diff --git a/AI-and-Analytics/Features-and-Functionality/IntelPython_GPU_numba-dpex_Genetic_Algorithm/IntelPython_GPU_numba-dpex_Genetic_Algorithm.ipynb b/AI-and-Analytics/Features-and-Functionality/IntelPython_GPU_numba-dpex_Genetic_Algorithm/IntelPython_GPU_numba-dpex_Genetic_Algorithm.ipynb
index cc1689202a..3d4ca24b98 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelPython_GPU_numba-dpex_Genetic_Algorithm/IntelPython_GPU_numba-dpex_Genetic_Algorithm.ipynb
+++ b/AI-and-Analytics/Features-and-Functionality/IntelPython_GPU_numba-dpex_Genetic_Algorithm/IntelPython_GPU_numba-dpex_Genetic_Algorithm.ipynb
@@ -348,7 +348,7 @@
"\n",
"The only par that differs form the standard implementation is the evaluation function.\n",
"\n",
- "The most important part is to specify the global index of the computation. This is the current index of the computed chromosomes. This serves as a loop function across all chromosomes."
+ "The most important part is to specify the index of the computation. This is the current index of the computed chromosomes. This serves as a loop function across all chromosomes."
]
},
{
@@ -365,10 +365,11 @@
"outputs": [],
"source": [
"import numba_dpex\n",
+ "from numba_dpex import kernel_api\n",
"\n",
"@numba_dpex.kernel\n",
- "def eval_genomes_sycl_kernel(chromosomes, fitnesses, chrom_length):\n",
- " pos = numba_dpex.get_global_id(0)\n",
+ "def eval_genomes_sycl_kernel(item: kernel_api.Item, chromosomes, fitnesses, chrom_length):\n",
+ " pos = item.get_id(0)\n",
" num_loops = 3000\n",
" for i in range(num_loops):\n",
" fitnesses[pos] += chromosomes[pos*chrom_length + 1]\n",
@@ -409,8 +410,9 @@
" chromosomes_flat = chromosomes.flatten()\n",
" chromosomes_flat_dpctl = dpnp.asarray(chromosomes_flat, device=\"gpu\")\n",
" fitnesses_dpctl = dpnp.asarray(fitnesses, device=\"gpu\")\n",
- "\n",
- " eval_genomes_sycl_kernel[numba_dpex.Range(pop_size)](chromosomes_flat_dpctl, fitnesses_dpctl, chrom_size)\n",
+ " \n",
+ " exec_range = kernel_api.Range(pop_size)\n",
+ " numba_dpex.call_kernel(eval_genomes_sycl_kernel, exec_range, chromosomes_flat_dpctl, fitnesses_dpctl, chrom_size)\n",
" fitnesses = dpnp.asnumpy(fitnesses_dpctl)\n",
" chromosomes = next_generation(chromosomes, fitnesses)\n",
" fitnesses = np.zeros(pop_size, dtype=np.float32)\n",
@@ -544,7 +546,7 @@
"\n",
"The evaluate created generation we are calculating the full distance of the given path (chromosome). In this example, the lower the fitness value is, the better the chromosome. That's different from the general GA that we implemented.\n",
"\n",
- "As in this example we are also using numba-dpex, we are using a global index like before."
+ "As in this example we are also using numba-dpex, we are using an index like before."
]
},
{
@@ -554,8 +556,8 @@
"outputs": [],
"source": [
"@numba_dpex.kernel\n",
- "def eval_genomes_plain_TSP_SYCL(chromosomes, fitnesses, distances, pop_length):\n",
- " pos = numba_dpex.get_global_id(0)\n",
+ "def eval_genomes_plain_TSP_SYCL(item: kernel_api.Item, chromosomes, fitnesses, distances, pop_length):\n",
+ " pos = item.get_id(1)\n",
" for j in range(pop_length-1):\n",
" fitnesses[pos] += distances[int(chromosomes[pos, j]), int(chromosomes[pos, j+1])]\n"
]
@@ -708,7 +710,8 @@
" chromosomes_flat_dpctl = dpnp.asarray(chromosomes, device=\"gpu\")\n",
" fitnesses_dpctl = dpnp.asarray(fitnesses.copy(), device=\"gpu\")\n",
"\n",
- " eval_genomes_plain_TSP_SYCL[numba_dpex.Range(pop_size)](chromosomes_flat_dpctl, fitnesses_dpctl, distances_dpctl, pop_size)\n",
+ " exec_range = kernel_api.Range(pop_size)\n",
+ " numba_dpex.call_kernel(eval_genomes_plain_TSP_SYCL, exec_range, chromosomes_flat_dpctl, fitnesses_dpctl, distances_dpctl, pop_size)\n",
" fitnesses = dpnp.asnumpy(fitnesses_dpctl)\n",
" chromosomes = next_generation_TSP(chromosomes, fitnesses)\n",
" fitnesses = np.zeros(pop_size, dtype=np.float32)\n",
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelPython_GPU_numba-dpex_Genetic_Algorithm/IntelPython_GPU_numba-dpex_Genetic_Algorithm.py b/AI-and-Analytics/Features-and-Functionality/IntelPython_GPU_numba-dpex_Genetic_Algorithm/IntelPython_GPU_numba-dpex_Genetic_Algorithm.py
index 072e8e97b6..6d0174fc8d 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelPython_GPU_numba-dpex_Genetic_Algorithm/IntelPython_GPU_numba-dpex_Genetic_Algorithm.py
+++ b/AI-and-Analytics/Features-and-Functionality/IntelPython_GPU_numba-dpex_Genetic_Algorithm/IntelPython_GPU_numba-dpex_Genetic_Algorithm.py
@@ -260,16 +260,17 @@ def next_generation(chromosomes, fitnesses):
#
# The only par that differs form the standard implementation is the evaluation function.
#
-# The most important part is to specify the global index of the computation. This is the current index of the computed chromosomes. This serves as a loop function across all chromosomes.
+# The most important part is to specify the index of the computation. This is the current index of the computed chromosomes. This serves as a loop function across all chromosomes.
# In[ ]:
import numba_dpex
+from numba_dpex import kernel_api
@numba_dpex.kernel
-def eval_genomes_sycl_kernel(chromosomes, fitnesses, chrom_length):
- pos = numba_dpex.get_global_id(0)
+def eval_genomes_sycl_kernel(item: kernel_api.Item, chromosomes, fitnesses, chrom_length):
+ pos = item.get_id(0)
num_loops = 3000
for i in range(num_loops):
fitnesses[pos] += chromosomes[pos*chrom_length + 1]
@@ -300,7 +301,8 @@ def eval_genomes_sycl_kernel(chromosomes, fitnesses, chrom_length):
chromosomes_flat_dpctl = dpnp.asarray(chromosomes_flat, device="gpu")
fitnesses_dpctl = dpnp.asarray(fitnesses, device="gpu")
- eval_genomes_sycl_kernel[numba_dpex.Range(pop_size)](chromosomes_flat_dpctl, fitnesses_dpctl, chrom_size)
+ exec_range = kernel_api.Range(pop_size)
+ numba_dpex.call_kernel(eval_genomes_sycl_kernel, exec_range, chromosomes_flat_dpctl, fitnesses_dpctl, chrom_size)
fitnesses = dpnp.asnumpy(fitnesses_dpctl)
chromosomes = next_generation(chromosomes, fitnesses)
fitnesses = np.zeros(pop_size, dtype=np.float32)
@@ -398,14 +400,14 @@ def eval_genomes_sycl_kernel(chromosomes, fitnesses, chrom_length):
#
# The evaluate created generation we are calculating the full distance of the given path (chromosome). In this example, the lower the fitness value is, the better the chromosome. That's different from the general GA that we implemented.
#
-# As in this example we are also using numba-dpex, we are using a global index like before.
+# As in this example we are also using numba-dpex, we are using an index like before.
# In[ ]:
@numba_dpex.kernel
-def eval_genomes_plain_TSP_SYCL(chromosomes, fitnesses, distances, pop_length):
- pos = numba_dpex.get_global_id(0)
+def eval_genomes_plain_TSP_SYCL(item: kernel_api.Item, chromosomes, fitnesses, distances, pop_length):
+ pos = item.get_id(1)
for j in range(pop_length-1):
fitnesses[pos] += distances[int(chromosomes[pos, j]), int(chromosomes[pos, j+1])]
@@ -526,7 +528,8 @@ def next_generation_TSP(chromosomes, fitnesses):
chromosomes_flat_dpctl = dpnp.asarray(chromosomes, device="gpu")
fitnesses_dpctl = dpnp.asarray(fitnesses.copy(), device="gpu")
- eval_genomes_plain_TSP_SYCL[numba_dpex.Range(pop_size)](chromosomes_flat_dpctl, fitnesses_dpctl, distances_dpctl, pop_size)
+ exec_range = kernel_api.Range(pop_size)
+ numba_dpex.call_kernel(eval_genomes_plain_TSP_SYCL, exec_range, chromosomes_flat_dpctl, fitnesses_dpctl, distances_dpctl, pop_size)
fitnesses = dpnp.asnumpy(fitnesses_dpctl)
chromosomes = next_generation_TSP(chromosomes, fitnesses)
fitnesses = np.zeros(pop_size, dtype=np.float32)
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/README.md b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/README.md
index 9b484f0213..8efdf0b337 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/README.md
+++ b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/README.md
@@ -2,7 +2,7 @@
The `Enable Auto-Mixed Precision for Transfer Learning with TensorFlow*` sample guides you through the process of enabling auto-mixed precision to use low-precision datatypes, like bfloat16, for transfer learning with TensorFlow* (TF).
-The sample demonstrates the end-to-end pipeline tasks typically performed in a deep learning use-case: training (and retraining), inference optimization, and serving the model with TensorFlow Serving.
+The sample demonstrates the tasks typically performed in a deep learning use-case: training (and retraining), and inference optimization. The sample also includes tips and boilerplate code for serving the model with TensorFlow Serving.
| Area | Description
|:--- |:---
@@ -37,10 +37,6 @@ You will need to download and install the following toolkits, tools, and compone
Install using PIP: `$pip install notebook`.
Alternatively, see [*Installing Jupyter*](https://jupyter.org/install) for detailed installation instructions.
-- **TensorFlow Serving**
-
- See *TensorFlow Serving* [*Installation*](https://www.tensorflow.org/tfx/serving/setup) for detailed installation options.
-
- **Other dependencies**
Install using PIP and the `requirements.txt` file supplied with the sample: `$pip install -r requirements.txt --no-deps`.
The `requirements.txt` file contains the necessary dependencies to run the Notebook.
@@ -112,6 +108,70 @@ You will see diagrams comparing performance and analysis. This includes performa
For performance analysis, you will see histograms showing different Tensorflow* operations in the analyzed pre-trained model pb file.
+## Serve the model with TensorFlow Serving
+
+### Installation
+See *TensorFlow Serving* [*Installation*](https://www.tensorflow.org/tfx/serving/setup) for detailed installation options.
+
+### Example Code
+
+Create a copy of the optimized model in a well-defined directory hierarchy with a version number "1".
+
+```
+!mkdir serving
+!cp -r models/my_optimized_model serving/1
+```
+
+```
+os.environ["MODEL_DIR"] = os.getcwd() + "/serving"
+```
+
+This is where we start running TensorFlow Serving and load our model. After it loads we can start making inference requests using REST. There are some important parameters:
+- **rest_api_port**: The port that you'll use for REST requests.
+- **model_name**: You'll use this in the URL of REST requests. It can be anything.
+- **model_base_path**: This is the path to the directory where you've saved your model.
+
+```
+%%bash --bg
+nohup tensorflow_model_server --rest_api_port=8501 --model_name=rn50 --model_base_path=${MODEL_DIR} > server.log 2>&1
+```
+
+#### Prepare the testing data for prediction
+
+```
+for image_batch, labels_batch in val_ds:
+ print(image_batch.shape)
+ print(labels_batch.shape)
+ break
+test_data, test_labels = image_batch.numpy(), labels_batch.numpy()
+```
+
+#### Make REST requests
+
+Now let's create the JSON object for a batch of three inference requests and we'll send a predict request as a POST to our server's REST endpoint, and pass it three examples.
+
+```
+import json
+import matplotlib.pyplot as plt
+
+def show(idx, title):
+ plt.figure()
+ plt.imshow(test_data[idx])
+ plt.axis('off')
+ plt.title('\n\n{}'.format(title), fontdict={'size': 16})
+
+data = json.dumps({"signature_name": "serving_default", "instances": test_data[0:3].tolist()})
+print('Data: {} ... {}'.format(data[:50], data[len(data)-52:]))
+
+headers = {"content-type": "application/json"}
+json_response = requests.post('http://localhost:8501/v1/models/rn50:predict', data=data, headers=headers)
+predictions = json.loads(json_response.text)['predictions']
+
+for i in range(0,3):
+ show(i, 'The model thought this was a {} (class {}), and it was actually a {} (class {})'.format(
+ class_names[np.argmax(predictions[i])], np.argmax(predictions[i]), class_names[test_labels[i]], test_labels[i]))
+```
+
## License
Code samples are licensed under the MIT license. See
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/enabling_automixed_precision_for_transfer_learning_with_tensorflow.ipynb b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/enabling_automixed_precision_for_transfer_learning_with_tensorflow.ipynb
index d923a2db0d..86cad227d9 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/enabling_automixed_precision_for_transfer_learning_with_tensorflow.ipynb
+++ b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/enabling_automixed_precision_for_transfer_learning_with_tensorflow.ipynb
@@ -32,7 +32,6 @@
"import tensorflow_hub as hub\n",
"from datetime import datetime\n",
"import requests\n",
- "from copy import deepcopy\n",
"print(\"We are using Tensorflow version: \", tf.__version__)"
]
},
@@ -443,19 +442,33 @@
"id": "8a03faef",
"metadata": {},
"source": [
- "Let's measure the performance of the model we just saved using the `tf_benchmark.py` script that runs inference on dummy data."
+ "Let's measure the performance of the model we just saved using the `tf_benchmark.py` script that runs inference on dummy data.\n",
+ "\n",
+ "_Note: We only use the auto-mixed precision policy if the underlying system is the 4th Gen Intel® Xeon® scalable processor (codenamed Sapphire Rapids)_"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "db6aa4b4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if arch == 'SPR':\n",
+ " PRECISION = \"bfloat16\"\n",
+ "else:\n",
+ " PRECISION = \"float32\"\n",
+ "print(\"Precision for inference: \", PRECISION)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fd855747",
- "metadata": {
- "scrolled": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "run scripts/tf_benchmark.py --model_path models/my_saved_model --num_warmup 5 --num_iter 50 --precision float32 --batch_size 32 --disable_optimize"
+ "!python scripts/tf_benchmark.py --model_path models/my_saved_model --num_warmup 5 --num_iter 50 --precision PRECISION --batch_size 32 --disable_optimize"
]
},
{
@@ -486,7 +499,7 @@
"metadata": {},
"outputs": [],
"source": [
- "run scripts/freeze_optimize_v2.py --input_saved_model_dir=models/my_saved_model --output_saved_model_dir=models/my_optimized_model"
+ "!python scripts/freeze_optimize_v2.py --input_saved_model_dir=models/my_saved_model --output_saved_model_dir=models/my_optimized_model"
]
},
{
@@ -501,12 +514,10 @@
"cell_type": "code",
"execution_count": null,
"id": "480dddda",
- "metadata": {
- "scrolled": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "run scripts/tf_benchmark.py --model_path models/my_optimized_model --num_warmup 5 --num_iter 50 --precision float32 --batch_size 32"
+ "!python scripts/tf_benchmark.py --model_path models/my_optimized_model --num_warmup 5 --num_iter 50 --precision PRECISION --batch_size 32"
]
},
{
@@ -526,174 +537,24 @@
"metadata": {},
"outputs": [],
"source": [
- "run scripts/plot.py"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "8157a5ec",
- "metadata": {},
- "source": [
- "### TensorFlow Serving\n",
- "\n",
- "In this section, we will initialize and run TensorFlow Serving natively to serve our retrained model."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "6a00c32d",
- "metadata": {},
- "outputs": [],
- "source": [
- "!mkdir serving\n",
- "!cp -r models/my_optimized_model serving/1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "a45b5438",
- "metadata": {},
- "outputs": [],
- "source": [
- "os.environ[\"MODEL_DIR\"] = os.getcwd() + \"/serving\""
- ]
- },
- {
- "cell_type": "markdown",
- "id": "edcd77c4",
- "metadata": {},
- "source": [
- "This is where we start running TensorFlow Serving and load our model. After it loads we can start making inference requests using REST. There are some important parameters:\n",
- "- **rest_api_port**: The port that you'll use for REST requests.\n",
- "- **model_name**: You'll use this in the URL of REST requests. It can be anything.\n",
- "- **model_base_path**: This is the path to the directory where you've saved your model."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "34aee14f",
- "metadata": {},
- "outputs": [],
- "source": [
- "%%bash --bg\n",
- "nohup tensorflow_model_server --rest_api_port=8501 --model_name=rn50 --model_base_path=${MODEL_DIR} > server.log 2>&1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "e486894a",
- "metadata": {},
- "outputs": [],
- "source": [
- "!tail server.log"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "7dc7606d",
- "metadata": {},
- "source": [
- "**Prepare the testing data for prediction**"
+ "!python scripts/plot.py"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "c9dfa9d8",
+ "id": "7c1bd119-ffc1-4761-a614-c2ffd83e6b4c",
"metadata": {},
"outputs": [],
- "source": [
- "for image_batch, labels_batch in val_ds:\n",
- " print(image_batch.shape)\n",
- " print(labels_batch.shape)\n",
- " break\n",
- "test_data, test_labels = image_batch.numpy(), labels_batch.numpy()"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "5d4e5f62",
- "metadata": {},
- "source": [
- "First, let's take a look at a random example from our test data."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "e2761dcf",
- "metadata": {},
- "outputs": [],
- "source": [
- "import matplotlib.pyplot as plt\n",
- "\n",
- "def show(idx, title):\n",
- " plt.figure()\n",
- " plt.imshow(test_data[idx])\n",
- " plt.axis('off')\n",
- " plt.title('\\n\\n{}'.format(title), fontdict={'size': 16})\n",
- "\n",
- "import random\n",
- "rando = random.randint(0,test_data.shape[0]-1)\n",
- "show(rando, 'An Example Image:')"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "3b362658",
- "metadata": {},
- "source": [
- "#### Make a request to your model in TensorFlow Serving\n",
- "\n",
- "Now let's create the JSON object for a batch of three inference requests, and see how well our model recognizes things:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "831bf2d1",
- "metadata": {
- "scrolled": true
- },
- "outputs": [],
- "source": [
- "import json\n",
- "data = json.dumps({\"signature_name\": \"serving_default\", \"instances\": test_data[0:3].tolist()})\n",
- "print('Data: {} ... {}'.format(data[:50], data[len(data)-52:]))"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "427f3c8b",
- "metadata": {},
- "source": [
- "#### Make REST requests\n",
- "\n",
- "We'll send a predict request as a POST to our server's REST endpoint, and pass it three examples."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3d7f5e5e",
- "metadata": {},
- "outputs": [],
- "source": [
- "headers = {\"content-type\": \"application/json\"}\n",
- "json_response = requests.post('http://localhost:8501/v1/models/rn50:predict', data=data, headers=headers)\n",
- "predictions = json.loads(json_response.text)['predictions']\n",
- "\n",
- "for i in range(0,3):\n",
- " show(i, 'The model thought this was a {} (class {}), and it was actually a {} (class {})'.format(\n",
- " class_names[np.argmax(predictions[i])], np.argmax(predictions[i]), class_names[test_labels[i]], test_labels[i]))"
- ]
+ "source": []
}
],
"metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
"language_info": {
"codemirror_mode": {
"name": "ipython",
@@ -704,7 +565,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.8.12"
+ "version": "3.10.12"
}
},
"nbformat": 4,
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/requirements.txt b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/requirements.txt
index d1622109b9..d14af0407d 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/requirements.txt
+++ b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/requirements.txt
@@ -1,4 +1,5 @@
-notebook
+neural_compressor==2.4.1
Pillow
-tensorflow_hub
+py-cpuinfo
requests
+tensorflow_hub==0.16.0
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/scripts/tf_benchmark.py b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/scripts/tf_benchmark.py
index dc00b7e695..7a5ca313c5 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/scripts/tf_benchmark.py
+++ b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_Enabling_Auto_Mixed_Precision_for_TransferLearning/scripts/tf_benchmark.py
@@ -190,7 +190,7 @@ def run_benchmark(model_details, args, find_graph_def):
throughput = 1.0 / avg_time * args.batch_size
print('Batch size = %d' % args.batch_size)
print("Latency: {:.3f} ms".format(latency))
- print("Throughput: {:.2f} fps".format(throughput))
+ print("Throughput: {:.2f} images per sec".format(throughput))
# Logging to a file
log_file = open("log.txt", "a")
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.ipynb b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.ipynb
index 3be5ebdfd1..2b4b9015e7 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.ipynb
+++ b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.ipynb
@@ -47,6 +47,7 @@
"source": [
"import string\n",
"import requests\n",
+ "import os\n",
"\n",
"response = requests.get('https://www.gutenberg.org/cache/epub/1497/pg1497.txt')\n",
"data = response.text.split('\\n')\n",
@@ -253,6 +254,11 @@
"metadata": {},
"outputs": [],
"source": [
+ "num_epochs = 200\n",
+ "# For custom epochs numbers from the environment\n",
+ "if \"ITEX_NUM_EPOCHS\" in os.environ:\n",
+ " num_epochs = int(os.environ.get('ITEX_NUM_EPOCHS'))\n",
+ "\n",
"neuron_coef = 4\n",
"itex_lstm_model = Sequential()\n",
"itex_lstm_model.add(Embedding(input_dim=vocab_size, output_dim=seq_length, input_length=seq_length))\n",
@@ -262,7 +268,7 @@
"itex_lstm_model.add(Dense(units=vocab_size, activation='softmax'))\n",
"itex_lstm_model.summary()\n",
"itex_lstm_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
- "itex_lstm_model.fit(x,y, batch_size=256, epochs=200)"
+ "itex_lstm_model.fit(x,y, batch_size=256, epochs=num_epochs)"
]
},
{
@@ -296,6 +302,11 @@
"seq_length = x.shape[1]\n",
"vocab_size = y.shape[1]\n",
"\n",
+ "num_epochs = 20\n",
+ "# For custom epochs numbers\n",
+ "if \"KERAS_NUM_EPOCHS\" in os.environ:\n",
+ " num_epochs = int(os.environ.get('KERAS_NUM_EPOCHS'))\n",
+ "\n",
"neuron_coef = 1\n",
"keras_lstm_model = Sequential()\n",
"keras_lstm_model.add(Embedding(input_dim=vocab_size, output_dim=seq_length, input_length=seq_length))\n",
@@ -305,7 +316,7 @@
"keras_lstm_model.add(Dense(units=vocab_size, activation='softmax'))\n",
"keras_lstm_model.summary()\n",
"keras_lstm_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
- "keras_lstm_model.fit(x,y, batch_size=256, epochs=20)"
+ "keras_lstm_model.fit(x,y, batch_size=256, epochs=num_epochs)"
]
},
{
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.py b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.py
index 2812615dbf..d4e4f5450d 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.py
+++ b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.py
@@ -28,6 +28,7 @@
import string
import requests
+import os
response = requests.get('https://www.gutenberg.org/cache/epub/1497/pg1497.txt')
data = response.text.split('\n')
@@ -168,6 +169,11 @@ def tokenize_prepare_dataset(lines):
# In[ ]:
+num_epochs = 200
+# For custom epochs numbers from the environment
+if "ITEX_NUM_EPOCHS" in os.environ:
+ num_epochs = int(os.environ.get('ITEX_NUM_EPOCHS'))
+
neuron_coef = 4
itex_lstm_model = Sequential()
itex_lstm_model.add(Embedding(input_dim=vocab_size, output_dim=seq_length, input_length=seq_length))
@@ -177,7 +183,7 @@ def tokenize_prepare_dataset(lines):
itex_lstm_model.add(Dense(units=vocab_size, activation='softmax'))
itex_lstm_model.summary()
itex_lstm_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
-itex_lstm_model.fit(x,y, batch_size=256, epochs=200)
+itex_lstm_model.fit(x,y, batch_size=256, epochs=num_epochs)
# ## Compared to LSTM from Keras
@@ -201,6 +207,11 @@ def tokenize_prepare_dataset(lines):
seq_length = x.shape[1]
vocab_size = y.shape[1]
+num_epochs = 20
+# For custom epochs numbers
+if "KERAS_NUM_EPOCHS" in os.environ:
+ num_epochs = int(os.environ.get('KERAS_NUM_EPOCHS'))
+
neuron_coef = 1
keras_lstm_model = Sequential()
keras_lstm_model.add(Embedding(input_dim=vocab_size, output_dim=seq_length, input_length=seq_length))
@@ -210,7 +221,7 @@ def tokenize_prepare_dataset(lines):
keras_lstm_model.add(Dense(units=vocab_size, activation='softmax'))
keras_lstm_model.summary()
keras_lstm_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
-keras_lstm_model.fit(x,y, batch_size=256, epochs=20)
+keras_lstm_model.fit(x,y, batch_size=256, epochs=num_epochs)
# ## Generating text based on the input
@@ -276,4 +287,3 @@ def generate_text_seq(model, tokenizer, text_seq_length, seed_text, generated_wo
print("[CODE_SAMPLE_COMPLETED_SUCCESFULLY]")
-
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/sample.json b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/sample.json
index bc6b668f72..d4f4ccbb9c 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/sample.json
+++ b/AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/sample.json
@@ -23,7 +23,7 @@
"id": "inc_text_generation_lstm_py",
"steps": [
"export ITEX_ENABLE_NEXTPLUGGABLE_DEVICE=0",
- "python TextGenerationModelTraining.py"
+ "ITEX_NUM_EPOCHS=5 KERAS_NUM_EPOCHS=5 python TextGenerationModelTraining.py"
]
}
]
diff --git a/AI-and-Analytics/Features-and-Functionality/IntelTransformers_Quantization/sample.json b/AI-and-Analytics/Features-and-Functionality/IntelTransformers_Quantization/sample.json
index f35192d8cc..b74790f5ad 100644
--- a/AI-and-Analytics/Features-and-Functionality/IntelTransformers_Quantization/sample.json
+++ b/AI-and-Analytics/Features-and-Functionality/IntelTransformers_Quantization/sample.json
@@ -13,16 +13,12 @@
"linux": [{
"env": [
"source /intel/oneapi/intelpython/bin/activate",
- "apt-get install -y numactl",
"conda activate pytorch",
- "pip install -r requirements.txt",
- "pip install jupyter ipykernel",
- "python -m ipykernel install --user --name=pytorch"
+ "pip install -r requirements.txt"
],
"id": "itrex_quantize_transformer_models",
"steps": [
- "jupyter nbconvert --ExecutePreprocessor.enabled=True --ExecutePreprocessor.kernel_name=pytorch --to notebook quantize_transformer_models_with_itrex.ipynb",
- "numactl -m 0 -C all python quantize_transformer_models_with_itrex.py --model_name \"Intel/neural-chat-7b-v3-1\" --quantize \"int8\" --max_new_tokens 50"
+ "python quantize_transformer_models_with_itrex.py --model_name \"Intel/neural-chat-7b-v3-1\" --quantize \"int4\" --max_new_tokens 50"
]
}]
},
diff --git a/AI-and-Analytics/Getting-Started-Samples/IntelPython_XGBoost_GettingStarted/README.md b/AI-and-Analytics/Getting-Started-Samples/IntelPython_XGBoost_GettingStarted/README.md
index b238332e67..0925788384 100755
--- a/AI-and-Analytics/Getting-Started-Samples/IntelPython_XGBoost_GettingStarted/README.md
+++ b/AI-and-Analytics/Getting-Started-Samples/IntelPython_XGBoost_GettingStarted/README.md
@@ -1,6 +1,6 @@
-# Intel® Python XGBoost* Getting Started Sample
+# Intel® Optimization for XGBoost* Getting Started Sample
-The `Intel® Python XGBoost* Getting Started` sample demonstrates how to set up and train an XGBoost model on datasets for prediction.
+The `Intel® Optimization for XGBoost* Getting Started` sample demonstrates how to set up and train an XGBoost model on datasets for prediction.
| Area | Description
| :--- | :---
@@ -12,7 +12,7 @@ The `Intel® Python XGBoost* Getting Started` sample demonstrates how to set up
XGBoost* is a widely used gradient boosting library in the classical ML area. Designed for flexibility, performance, and portability, XGBoost* includes optimized distributed gradient boosting frameworks and implements Machine Learning algorithms underneath. Starting with 0.9 version of XGBoost, Intel has been up streaming optimizations through the `hist` histogram tree-building method. Starting with 1.3.3 version of XGBoost and beyond, Intel has also begun up streaming inference optimizations to XGBoost as well.
-In this code sample, you will learn how to use Intel optimizations for XGBoost published as part of Intel® AI Tools. The sample also illustrates how to set up and train an XGBoost* model on datasets for prediction. It also demonstrates how to use software products that can be found in the [Intel® AI Tools](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html).
+In this code sample, you will learn how to use Intel optimizations for XGBoost published as part of AI Tools. The sample also illustrates how to set up and train an XGBoost* model on datasets for prediction. It also demonstrates how to use software products that can be found in the [AI Tools](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html).
## Prerequisites
@@ -22,96 +22,126 @@ In this code sample, you will learn how to use Intel optimizations for XGBoost p
| Hardware | Intel Atom® Processors
Intel® Core™ Processor Family
Intel® Xeon® Processor Family
Intel® Xeon® Scalable processor family
| Software | XGBoost*
+> **Note**: AI and Analytics samples are validated on AI Tools Offline Installer. For the full list of validated platforms refer to [Platform Validation](https://github.com/oneapi-src/oneAPI-samples/tree/master?tab=readme-ov-file#platform-validation).
+
## Key Implementation Details
-- This Getting Started sample code is implemented for CPU using the Python language. The example assumes you have XGboost installed inside a conda environment, similar to what is delivered with the installation of the Intel® Distribution for Python* as part of the [Intel® AI Tools](https://software.intel.com/en-us/oneapi/ai-kit).
+- This Getting Started sample code is implemented for CPU using the Python language. The example assumes you have XGboost installed inside a conda environment, similar to what is delivered with the installation of the Intel® Distribution for Python*.
-- XGBoost* is ready for use once you finish the Intel® AI Tools installation and have run the post installation script.
+- XGBoost* is ready for use once you finish the AI Tools installation and have run the post installation script.
## Environment Setup
You will need to download and install the following toolkits, tools, and components to use the sample.
-**1. Get Intel® AI Tools**
+**1. Get AI Tools**
Required AI Tools: Intel® Optimization for XGBoost*
If you have not already, select and install these Tools via [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html). AI and Analytics samples are validated on AI Tools Offline Installer. It is recommended to select Offline Installer option in AI Tools Selector.
+>**Note**: If Docker option is chosen in AI Tools Selector, refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
-**2. Install dependencies**
+**2. (Offline Installer) Activate the AI Tools bundle base environment**
+
+If the default path is used during the installation of AI Tools:
```
-pip install -r requirements.txt
+source $HOME/intel/oneapi/intelpython/bin/activate
+```
+If a non-default path is used:
+```
+source /bin/activate
+```
+
+ **3. (Offline Installer) Activate relevant Conda environment**
+```
+conda activate base
+```
+
+**4. Clone the GitHub repository**
+```
+git clone https://github.com/oneapi-src/oneAPI-samples.git
+cd oneAPI-samples/AI-and-Analytics/Getting-Started-Samples/IntelPython_XGBoost_GettingStarted
+```
+
+**5. Install dependencies**
+>**Note**: Before running the following commands, make sure your Conda/Python environment with AI Tools installed is activated
+
```
-**Install Jupyter Notebook** by running `pip install notebook`. Alternatively, see [Installing Jupyter](https://jupyter.org/install) for detailed installation instructions.
+pip install -r requirements.txt
+pip install notebook
+```
+For Jupyter Notebook, refer to [Installing Jupyter](https://jupyter.org/install) for detailed installation instructions.
## Run the Sample
>**Note**: Before running the sample, make sure [Environment Setup](https://github.com/oneapi-src/oneAPI-samples/tree/master/AI-and-Analytics/Getting-Started-Samples/IntelPython_XGBoost_GettingStarted#environment-setup) is completed.
+
Go to the section which corresponds to the installation method chosen in [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html) to see relevant instructions:
* [AI Tools Offline Installer (Validated)](#ai-tools-offline-installer-validated)
* [Conda/PIP](#condapip)
* [Docker](#docker)
### AI Tools Offline Installer (Validated)
-1. If you have not already done so, activate the AI Tools bundle base environment. If you used the default location to install AI Tools, open a terminal and type the following
-```
-source $HOME/intel/oneapi/intelpython/bin/activate
-```
-If you used a separate location, open a terminal and type the following
+
+**1. Register Conda kernel to Jupyter Notebook kernel**
+
+If the default path is used during the installation of AI Tools:
```
-source /bin/activate
+$HOME/intel/oneapi/intelpython/envs/base/bin/python -m ipykernel install --user --name=base
```
-2. Activate the Conda environment:
+If a non-default path is used:
```
-conda activate xgboost
-```
-3. Clone the GitHub repository:
-```
-git clone https://github.com/oneapi-src/oneAPI-samples.git
-cd oneAPI-samples/AI-and-Analytics/Getting-Started-Samples/IntelPython_XGBoost_GettingStarted
+/bin/python -m ipykernel install --user --name=base
```
-
-4. Launch Jupyter Notebook:
-> **Note**: You might need to register Conda kernel to Jupyter Notebook kernel,
-feel free to check [the instruction](https://github.com/IntelAI/models/tree/master/docs/notebooks/perf_analysis#option-1-conda-environment-creation)
+**2. Launch Jupyter Notebook**
+
```
jupyter notebook --ip=0.0.0.0
```
-
-5. Follow the instructions to open the URL with the token in your browser.
-6. Select the Notebook:
+**3. Follow the instructions to open the URL with the token in your browser**
+
+**4. Select the Notebook**
+
```
IntelPython_XGBoost_GettingStarted.ipynb
```
-
-7. Change the kernel to xgboost
-
-8. Run every cell in the Notebook in sequence.
+**5. Change the kernel to `base`**
+
+**6. Run every cell in the Notebook in sequence**
### Conda/PIP
-> **Note**: Make sure your Conda/Python environment with AI Tools installed is activated
-1. Clone the GitHub repository:
-```
-git clone https://github.com/oneapi-src/oneAPI-samples.git
-cd oneapi-samples/AI-and-Analytics/Getting-Started-Samples/IntelPython_XGBoost_GettingStarted
+> **Note**: Before running the instructions below, make sure your Conda/Python environment with AI Tools installed is activated
+
+**1. Register Conda/Python kernel to Jupyter Notebook kernel**
+
+For Conda:
+```
+/bin/python -m ipykernel install --user --name=
+```
+To know , run `conda env list` and find your Conda environment path.
+
+For PIP:
+```
+python -m ipykernel install --user --name=
```
-2. Launch Jupyter Notebook:
-> **Note**: You might need to register Conda kernel to Jupyter Notebook kernel,
-feel free to check [the instruction](https://github.com/IntelAI/models/tree/master/docs/notebooks/perf_analysis#option-1-conda-environment-creation)
+**2. Launch Jupyter Notebook**
+
```
jupyter notebook --ip=0.0.0.0
```
-
-4. Follow the instructions to open the URL with the token in your browser.
-5. Select the Notebook:
+**3. Follow the instructions to open the URL with the token in your browser**
+
+**4. Select the Notebook**
+
```
IntelPython_XGBoost_GettingStarted.ipynb
```
-6. Run every cell in the Notebook in sequence.
+**5. Change the kernel to ``**
+
+
+**6. Run every cell in the Notebook in sequence**
### Docker
AI Tools Docker images already have Get Started samples pre-installed. Refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
-
-
## Example Output
>**Note**: Your numbers might be different.
@@ -129,8 +159,10 @@ RMSE: 11.113036205909719
## License
Code samples are licensed under the MIT license. See
-[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt) for details.
+[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt)
+for details.
-Third party program Licenses can be found here: [third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt).
+Third party program Licenses can be found here:
+[third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt)
*Other names and brands may be claimed as the property of others. [Trademarks](https://www.intel.com/content/www/us/en/legal/trademarks.html)
diff --git a/AI-and-Analytics/Getting-Started-Samples/IntelPython_daal4py_GettingStarted/README.md b/AI-and-Analytics/Getting-Started-Samples/IntelPython_daal4py_GettingStarted/README.md
index 39233bf8c6..59a7968fba 100755
--- a/AI-and-Analytics/Getting-Started-Samples/IntelPython_daal4py_GettingStarted/README.md
+++ b/AI-and-Analytics/Getting-Started-Samples/IntelPython_daal4py_GettingStarted/README.md
@@ -24,107 +24,132 @@ In this sample, you will run a batch Linear Regression model with oneDAL daal4py
| Software | Intel® oneAPI Data Analytics Library (oneDAL)
> **Note**: AI and Analytics samples are validated on AI Tools Offline Installer. For the full list of validated platforms refer to [Platform Validation](https://github.com/oneapi-src/oneAPI-samples/tree/master?tab=readme-ov-file#platform-validation).
-### For Local Development Environments
+## Key Implementation Details
+
+- This get started sample code is implemented for CPUs using the Python language. The example assumes you have daal4py and scikit-learn installed inside a conda environment, similar to what is delivered with the installation of the Intel® Distribution for Python*.
+
+- The Intel® oneAPI Data Analytics Library (oneDAL) is ready for use once you finish the AI Tools installation and have run the post installation script.
+
+## Environment Setup
You will need to download and install the following toolkits, tools, and components to use the sample.
-- **Intel® AI Tools**
+**1. Get AI Tools**
- You can get the AI Tools from [Intel® oneAPI Toolkits](https://www.intel.com/content/www/us/en/developer/tools/oneapi/toolkits.html#analytics-kit).
See [*Get Started with the Intel® AI Tools for Linux**](https://www.intel.com/content/www/us/en/develop/documentation/get-started-with-ai-linux) for AI Kit installation information and post-installation steps and scripts.
+Required AI Tools: daal4py (Select Intel® Extension for Scikit-learn* on [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html) to install)
-## Key Implementation Details
-- This get started sample code is implemented for CPUs using the Python language. The example assumes you have daal4py and scikit-learn installed inside a conda environment, similar to what is delivered with the installation of the Intel® Distribution for Python* as part of the Intel® AI Analytics Toolkit.
+If you have not already, select and install these Tools via [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html). AI and Analytics samples are validated on AI Tools Offline Installer. It is recommended to select Offline Installer option in AI Tools Selector.
-- The Intel® oneAPI Data Analytics Library (oneDAL) is ready for use once you finish the Intel® AI Analytics Toolkit installation and have run the post installation script.
+>**Note**: If Docker option is chosen in AI Tools Selector, refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
-## Environment Setup
+**2. (Offline Installer) Activate the AI Tools bundle base environment**
-You will need to download and install the following toolkits, tools, and components to use the sample.
+If the default path is used during the installation of AI Tools:
+```
+source $HOME/intel/oneapi/intelpython/bin/activate
+```
+If a non-default path is used:
+```
+source /bin/activate
+```
+
+**3. (Offline Installer) Activate relevant Conda environment**
+
+```
+conda activate base
+```
-**1. Get Intel® AI Tools**
+**4. Clone the GitHub repository**
+
+
+```
+git clone https://github.com/oneapi-src/oneAPI-samples.git
+cd oneAPI-samples/AI-and-Analytics/Getting-Started-Samples/IntelPython_daal4py_GettingStarted
+```
-Required AI Tools: Intel® Optimization for XGBoost*
-
If you have not already, select and install these Tools via [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html). AI and Analytics samples are validated on AI Tools Offline Installer. It is recommended to select Offline Installer option in AI Tools Selector.
+**5. Install dependencies**
+
+>**Note**: Before running the following commands, make sure your Conda/Python environment with AI Tools installed is activated
-**2. Install dependencies**
```
pip install -r requirements.txt
-```
-**Install Jupyter Notebook** by running `pip install notebook`. Alternatively, see [Installing Jupyter](https://jupyter.org/install) for detailed installation instructions.
+pip install notebook
+```
+For Jupyter Notebook, refer to [Installing Jupyter](https://jupyter.org/install) for detailed installation instructions.
## Run the Sample
->**Note**: Before running the sample, make sure [Environment Setup](https://github.com/oneapi-src/oneAPI-samples/tree/master/AI-and-Analytics/Getting-Started-Samples/IntelPython_daal4py_GettingStarted#environment-setup) is completed.
+>**Note**: Before running the sample, make sure [Environment Setup](https://github.com/oneapi-src/oneAPI-samples/tree/master/AI-and-Analytics/Getting-Started-Samples/INC-Quantization-Sample-for-PyTorch#environment-setup) is completed.
+
Go to the section which corresponds to the installation method chosen in [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html) to see relevant instructions:
* [AI Tools Offline Installer (Validated)](#ai-tools-offline-installer-validated)
* [Conda/PIP](#condapip)
* [Docker](#docker)
### AI Tools Offline Installer (Validated)
-1. If you have not already done so, activate the AI Tools bundle base environment. If you used the default location to install AI Tools, open a terminal and type the following
-```
-source $HOME/intel/oneapi/intelpython/bin/activate
+
+**1. Register Conda kernel to Jupyter Notebook kernel**
+
+If the default path is used during the installation of AI Tools:
```
-If you used a separate location, open a terminal and type the following
+$HOME/intel/oneapi/intelpython/envs/base/bin/python -m ipykernel install --user --name=base
```
-source /bin/activate
+If a non-default path is used:
```
-2. Activate the Conda environment:
-
+/bin/python -m ipykernel install --user --name=base
```
-conda activate xgboost
-```
-3. Clone the GitHub repository:
-```
-git clone https://github.com/oneapi-src/oneAPI-samples.git
-cd oneAPI-samples/AI-and-Analytics/Getting-Started-Samples/IntelPython_daal4py_GettingStarted
-```
-4. Launch Jupyter Notebook:
-> **Note**: You might need to register Conda kernel to Jupyter Notebook kernel,
-feel free to check [the instruction](https://github.com/IntelAI/models/tree/master/docs/notebooks/perf_analysis#option-1-conda-environment-creation)
+
+**2. Launch Jupyter Notebook**
+
```
jupyter notebook --ip=0.0.0.0
```
-
-5. Follow the instructions to open the URL with the token in your browser.
-6. Select the Notebook:
+**3. Follow the instructions to open the URL with the token in your browser**
+
+**4. Select the Notebook**
```
IntelPython_daal4py_GettingStarted.ipynb
```
-
-7. Change the kernel to xgboost
+**5. Change the kernel to `base`**
-8. Run every cell in the Notebook in sequence.
+**6. Run every cell in the Notebook in sequence**
### Conda/PIP
-> **Note**: Make sure your Conda/Python environment with AI Tools installed is activated
-1. Clone the GitHub repository:
-```
-git clone https://github.com/oneapi-src/oneAPI-samples.git
-cd oneapi-samples/AI-and-Analytics/Getting-Started-Samples/IntelPython_daal4py_GettingStarted
+> **Note**: Before running the instructions below, make sure your Conda/Python environment with AI Tools installed is activated
+
+**1. Register Conda/Python kernel to Jupyter Notebook kernel**
+
+For Conda:
+```
+/bin/python -m ipykernel install --user --name=
```
+To know , run `conda env list` and find your Conda environment path.
-2. Launch Jupyter Notebook:
-> **Note**: You might need to register Conda kernel to Jupyter Notebook kernel,
-feel free to check [the instruction](https://github.com/IntelAI/models/tree/master/docs/notebooks/perf_analysis#option-1-conda-environment-creation)
+For PIP:
+```
+python -m ipykernel install --user --name=
+```
+**2. Launch Jupyter Notebook**
+
```
jupyter notebook --ip=0.0.0.0
```
-
-4. Follow the instructions to open the URL with the token in your browser.
-5. Select the Notebook:
+**3. Follow the instructions to open the URL with the token in your browser**
+
+**4. Select the Notebook**
```
-IntelPython_daal4py_GettingStarted.ipynb.ipynb
+IntelPython_daal4py_GettingStarted.ipynb
```
+**5. Change the kernel to ``**
+
-6. Run every cell in the Notebook in sequence.
+**6. Run every cell in the Notebook in sequence**
### Docker
AI Tools Docker images already have Get Started samples pre-installed. Refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
-
-
## Example Output
```
@@ -161,8 +186,10 @@ Here is one of our loaded model's features:
## License
Code samples are licensed under the MIT license. See
-[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt) for details.
+[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt)
+for details.
-Third-party program licenses can be found here: [third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt).
+Third party program Licenses can be found here:
+[third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt)
*Other names and brands may be claimed as the property of others. [Trademarks](https://www.intel.com/content/www/us/en/legal/trademarks.html)
diff --git a/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_SKLearn_GettingStarted/readme.md b/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_SKLearn_GettingStarted/readme.md
index 38839d565b..4073e490c4 100644
--- a/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_SKLearn_GettingStarted/readme.md
+++ b/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_SKLearn_GettingStarted/readme.md
@@ -1,6 +1,6 @@
-# Intel® Python Scikit-learn Extension Getting Started Sample
+# Intel® Extension for Scikit-learn Getting Started Sample
-The `Intel® Python Scikit-learn Extension Getting Started` sample demonstrates how to use a support vector machine classifier from Intel® Extension for Scikit-learn* for digit recognition problem. All other machine learning algorithms available with Scikit-learn can be used in the similar way. Intel® Extension for Scikit-learn* speeds up scikit-learn applications. The acceleration is achieved through the use of the Intel® oneAPI Data Analytics Library (oneDAL) [Intel oneAPI Data Analytics Library](https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/onedal.html), which comes with [Intel® AI Analytics Toolkit (AI Kit)](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html).
+The `Intel® Extension for Scikit-learn Getting Started` sample demonstrates how to use a support vector machine classifier from Intel® Extension for Scikit-learn* for digit recognition problem. All other machine learning algorithms available with Scikit-learn can be used in the similar way. Intel® Extension for Scikit-learn* speeds up scikit-learn applications. The acceleration is achieved through the use of the Intel® oneAPI Data Analytics Library (oneDAL) [Intel oneAPI Data Analytics Library](https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/onedal.html), which comes with [AI Tools](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html).
| Area | Description
@@ -13,83 +13,137 @@ The `Intel® Python Scikit-learn Extension Getting Started` sample demonstrates
## Purpose
In this sample, you will run a support vector classifier model from sklearn with oneDAL Daal4py library memory objects. You will also learn how to train a model and save the information to a file. Intel® Extension for Scikit-learn* depends on Intel® Daal4py. Daal4py is a simplified API to oneDAL that allows for fast usage of the framework suited for Data Scientists or Machine Learning users. Built to help provide an abstraction to oneDAL for direct usage or integration into one's own framework.
+
## Prerequisites
| Optimized for | Description
| :--- | :---
| OS | Ubuntu* 20.04 (or newer)
| Hardware | Intel Atom® processors
Intel® Core™ processor family
Intel® Xeon® processor family
Intel® Xeon® Scalable processor family
-| Software | Intel® AI Analytics Toolkit (AI Kit)
Intel® oneAPI Data Analytics Library (oneDAL)
Joblib
NumPy
Matplotlib
-
-You can refer to the oneAPI [product page](https://software.intel.com/en-us/oneapi) for toolkit installation and the Toolkit *[Get Started with the Intel® AI Analytics Toolkit for Linux*
-](https://software.intel.com/en-us/get-started-with-intel-oneapi-linux-get-started-with-the-intel-ai-analytics-toolkit)* for post-installation steps and scripts.
-
-oneDAL is ready for use once you finish the AI Kit installation and have run the post installation script.
+| Software | AI Tools
Intel® oneAPI Data Analytics Library (oneDAL)
Joblib
NumPy
Matplotlib
+> **Note**: AI and Analytics samples are validated on AI Tools Offline Installer. For the full list of validated platforms refer to [Platform Validation](https://github.com/oneapi-src/oneAPI-samples/tree/master?tab=readme-ov-file#platform-validation).
## Key Implementation Details
-This Getting Started sample code is implemented for CPU using the Python language. The example assumes you have Intel® Extension for Scikit-learn* installed inside a conda environment, similar to what is delivered with the installation of the Intel® Distribution for Python* as part of the [Intel® AI Analytics Toolkit](https://software.intel.com/en-us/oneapi/ai-kit). Intel® Extension for Scikit-learn* is available as a part of Intel® AI Analytics Toolkit (AI kit).
+This Getting Started sample code is implemented for CPU using the Python language. The example assumes you have Intel® Extension for Scikit-learn* installed inside a conda environment, similar to what is delivered with the installation of the Intel® Distribution for Python*. Intel® Extension for Scikit-learn* is available as a part of AI Tools.
## Environment Setup
-1. If you have not already done so, activate the AI Tools bundle base environment. If you used the default location to install AI Tools, open a terminal and type the following
+You will need to download and install the following toolkits, tools, and components to use the sample.
+
+**1. Get AI Tools**
+
+Required AI Tools: Intel® Extension for Scikit-learn*
+
+If you have not already, select and install these Tools via [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html). AI and Analytics samples are validated on AI Tools Offline Installer. It is recommended to select Offline Installer option in AI Tools Selector.
+
+>**Note**: If Docker option is chosen in AI Tools Selector, refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
+
+**2. (Offline Installer) Activate the AI Tools bundle base environment**
+
+If the default path is used during the installation of AI Tools:
```
source $HOME/intel/oneapi/intelpython/bin/activate
```
-If you used a separate location, open a terminal and type the following
+If a non-default path is used:
```
source /bin/activate
```
-2. Activate Conda with Root Access
+**3. (Offline Installer) Activate relevant Conda environment**
+
+```
+conda activate base
+```
+
+**4. Clone the GitHub repository**
-Intel Python environment will be active by default. However, if you activated another environment, you can return with the following command.
+```
+git clone https://github.com/oneapi-src/oneAPI-samples.git
+cd oneAPI-samples/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_SKLearn_GettingStarted
```
-source activate base
-pip install -r requirements.txt
+
+**5. Install dependencies**
+>**Note**: Before running the following commands, make sure your Conda/Python environment with AI Tools installed is activated
+
```
+pip install -r requirements.txt
+pip install notebook
+```
+For Jupyter Notebook, refer to [Installing Jupyter](https://jupyter.org/install) for detailed installation instructions.
+
+## Run the Sample
+>**Note**: Before running the sample, make sure [Environment Setup](https://github.com/oneapi-src/oneAPI-samples/tree/master/AI-and-Analytics/Getting-Started-Samples/INC-Quantization-Sample-for-PyTorch#environment-setup) is completed.
-2a. Activate Conda without Root Access (Optional)
+Go to the section which corresponds to the installation method chosen in [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html) to see relevant instructions:
+* [AI Tools Offline Installer (Validated)](#ai-tools-offline-installer-validated)
+* [Conda/PIP](#condapip)
+* [Docker](#docker)
-By default, the Intel® AI Analytics Toolkit is installed in the inteloneapi folder, which requires root privileges to manage it. If you would like to bypass using root access to manage your conda environment, then you can clone and activate your desired conda environment using the following commands.
+### AI Tools Offline Installer (Validated)
+
+**1. Register Conda kernel to Jupyter Notebook kernel**
+
+If the default path is used during the installation of AI Tools:
```
-conda create --name usr_intelpython --clone base
-source activate usr_intelpython
+$HOME/intel/oneapi/intelpython/envs/base/bin/python -m ipykernel install --user --name=base
```
-3. Clone the GitHub repository
+If a non-default path is used:
```
-git clone https://github.com/oneapi-src/oneAPI-samples.git
-cd oneAPI-samples/AI-and-Analytics/Getting-Started-Samples
+/bin/python -m ipykernel install --user --name=base
```
-### Install Jupyter Notebook
+**2. Launch Jupyter Notebook**
+
+```
+jupyter notebook --ip=0.0.0.0
+```
+**3. Follow the instructions to open the URL with the token in your browser**
-1. Change to the sample directory.
-2. Install Jupyter Notebook with the proper kernel.
- ```
- conda install jupyter nb_conda_kernels
- ```
+**4. Select the Notebook**
+```
+Intel_Extension_For_SKLearn_GettingStarted.ipynb
+```
+**5. Change the kernel to `base`**
+
+
+**6. Run every cell in the Notebook in sequence**
-#### View in Jupyter Notebook
+### Conda/PIP
+> **Note**: Before running the instructions below, make sure your Conda/Python environment with AI Tools installed is activated
->**Note**: This distributed execution cannot be launched from Jupyter Notebook, but you can still view inside the notebook to follow the included write-up and description.
+**1. Register Conda/Python kernel to Jupyter Notebook kernel**
+
+For Conda:
+```
+/bin/python -m ipykernel install --user --name=
+```
+To know , run `conda env list` and find your Conda environment path.
-1. Change to the sample directory.
-2. Launch Jupyter Notebook.
- ```
- jupyter notebook
- ```
-3. Locate and select the Notebook.
- ```
- Intel_Extension_For_SKLearn_GettingStarted.ipynb
- ```
-4. Click the **Run** button to move through the cells in sequence.
+For PIP:
+```
+python -m ipykernel install --user --name=
+```
+**2. Launch Jupyter Notebook**
+
+```
+jupyter notebook --ip=0.0.0.0
+```
+**3. Follow the instructions to open the URL with the token in your browser**
+**4. Select the Notebook**
+
+```
+Intel_Extension_For_SKLearn_GettingStarted.ipynb
+```
+**5. Change the kernel to ``**
+
-#### Troubleshooting
+**6. Run every cell in the Notebook in sequence**
-If you receive an error message, troubleshoot the problem using the **Diagnostics Utility for Intel® oneAPI Toolkits**. The diagnostic utility provides configuration and system checks to help find missing dependencies, permissions errors, and other issues. See the *[Diagnostics Utility for Intel® oneAPI Toolkits User Guide](https://www.intel.com/content/www/us/en/develop/documentation/diagnostic-utility-user-guide/top.html)* for more information on using the utility.
+### Docker
+AI Tools Docker images already have Get Started samples pre-installed. Refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
## Example Output
@@ -113,8 +167,10 @@ Model accuracy on test data: 0.9833333333333333
## License
Code samples are licensed under the MIT license. See
-[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt) for details.
+[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt)
+for details.
-Third party program Licenses can be found here: [third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt).
+Third party program Licenses can be found here:
+[third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt)
*Other names and brands may be claimed as the property of others. [Trademarks](https://www.intel.com/content/www/us/en/legal/trademarks.html)
diff --git a/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/ResNet50_Inference.ipynb b/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/ResNet50_Inference.ipynb
index be456ccf94..73c4ff980d 100755
--- a/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/ResNet50_Inference.ipynb
+++ b/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/ResNet50_Inference.ipynb
@@ -96,7 +96,13 @@
}
],
"source": [
- "run ../../version_check.py"
+ "import sys\n",
+ "sys.path.append('../../')\n",
+ "\n",
+ "import version_check\n",
+ "\n",
+ "arch = version_check.arch_checker().arch\n",
+ "print(\"Arch: \", arch)"
]
},
{
@@ -123,8 +129,8 @@
"source": [
"%%writefile run.sh\n",
"#!/bin/bash\n",
- "source $ONEAPI_INSTALL/setvars.sh --force > /dev/null 2>&1\n",
- "source activate user-tensorflow-gpu\n",
+ "source /intel/oneapi/intelpython/bin/activate\n",
+ "conda activate tensorflow-gpu\n",
"echo \"########## Executing the run\"\n",
"DNNL_VERBOSE=1 python infer_resnet50.py > infer_rn50_gpu.csv\n",
"echo \"########## Done with the run\""
@@ -148,7 +154,7 @@
"metadata": {},
"outputs": [],
"source": [
- "! chmod 755 ../../q; chmod 755 run.sh;if [ -x \"$(command -v qsub)\" ]; then ./q run.sh; else ./run.sh; fi"
+ "!chmod 755 run.sh; ./run.sh;"
]
},
{
@@ -167,8 +173,8 @@
"source": [
"%%writefile run.sh\n",
"#!/bin/bash\n",
- "source $ONEAPI_INSTALL/setvars.sh --force > /dev/null 2>&1\n",
- "source activate user-tensorflow\n",
+ "source /intel/oneapi/intelpython/bin/activate\n",
+ "source activate tensorflow\n",
"echo \"########## Executing the run\"\n",
"DNNL_VERBOSE=1 python infer_resnet50.py > infer_rn50_cpu.csv\n",
"echo \"########## Done with the run\""
@@ -192,7 +198,7 @@
"metadata": {},
"outputs": [],
"source": [
- "! chmod 755 ../../q; chmod 755 run.sh;if [ -x \"$(command -v qsub)\" ]; then ./q run.sh; else ./run.sh; fi"
+ "!chmod 755 run.sh; ./run.sh;"
]
},
{
diff --git a/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/requirements.txt b/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/requirements.txt
deleted file mode 100644
index a0ecfa082e..0000000000
--- a/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-tensorflow_hub
-ipykernel
-matplotlib
diff --git a/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/sample.json b/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/sample.json
index 55522107c0..2832c86df0 100755
--- a/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/sample.json
+++ b/AI-and-Analytics/Getting-Started-Samples/Intel_Extension_For_TensorFlow_GettingStarted/sample.json
@@ -15,22 +15,20 @@
"env": [
"source /intel/oneapi/intelpython/bin/activate",
"conda activate tensorflow",
- "pip install -r requirements.txt --no-deps",
+ "pip install tensorflow_hub==0.16.0 matplotlib",
"pip install tensorflow==2.15.0.post1",
"pip install jupyter ipykernel",
"python -m ipykernel install --user --name=tensorflow",
"conda deactivate",
"conda activate tensorflow-gpu",
- "pip install -r requirements.txt --no-deps",
+ "pip install tensorflow_hub==0.16.0 matplotlib",
"pip install tensorflow==2.15.0.post1",
- "pip install jupyter ipykernel",
- "python -m ipykernel install --user --name=tensorflow-gpu",
"conda deactivate"
],
"id": "itex_sample_test",
"steps": [
"conda activate tensorflow",
- "jupyter nbconvert --to notebook --execute ResNet50_Inference.ipynb"
+ "jupyter nbconvert --ExecutePreprocessor.enabled=True --ExecutePreprocessor.kernel_name=tensorflow --to notebook ResNet50_Inference.ipynb"
]
}]
},
diff --git a/AI-and-Analytics/Getting-Started-Samples/Modin_GettingStarted/README.md b/AI-and-Analytics/Getting-Started-Samples/Modin_GettingStarted/README.md
index 50b7e716ca..f7e4255c0f 100644
--- a/AI-and-Analytics/Getting-Started-Samples/Modin_GettingStarted/README.md
+++ b/AI-and-Analytics/Getting-Started-Samples/Modin_GettingStarted/README.md
@@ -1,11 +1,11 @@
# Modin* Get Started Sample
-The `Modin* Getting Started` sample demonstrates how to use distributed Pandas using the Modin package.
+The `Modin Getting Started` sample demonstrates how to use distributed Pandas using the Modin package.
| Area | Description
| :--- | :---
| Category | Getting Started
-| What you will learn | Basic Modin* programming model for Intel processors
+| What you will learn | Basic Modin programming model for Intel processors
| Time to complete | 5 to 8 minutes
## Purpose
@@ -20,92 +20,131 @@ In this sample, you will run Modin-accelerated Pandas functions and note the per
| :--- | :---
| OS | Ubuntu* 18.04 (or newer)
| Hardware | Intel® Atom® processors
Intel® Core™ processor family
Intel® Xeon® processor family
Intel® Xeon® Scalable Performance processor family
-| Software | Modin
+| Software | Modin
+> **Note**: AI and Analytics samples are validated on AI Tools Offline Installer. For the full list of validated platforms refer to [Platform Validation](https://github.com/oneapi-src/oneAPI-samples/tree/master?tab=readme-ov-file#platform-validation).
## Key Implementation Details
This get started sample code is implemented for CPU using the Python language. The example assumes you have Pandas and Modin installed inside a conda environment.
## Environment Setup
+You will need to download and install the following toolkits, tools, and components to use the sample.
+
+**1. Get AI Tools**
+
+Required AI Tools: Modin
+
+If you have not already, select and install these Tools via [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html). AI and Analytics samples are validated on AI Tools Offline Installer. It is recommended to select Offline Installer option in AI Tools Selector.
+
+>**Note**: If Docker option is chosen in AI Tools Selector, refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
+
+**2. (Offline Installer) Activate the AI Tools bundle base environment**
+
+If the default path is used during the installation of AI Tools:
+```
+source $HOME/intel/oneapi/intelpython/bin/activate
+```
+If a non-default path is used:
+```
+source /bin/activate
+```
+
+**3. (Offline Installer) Activate relevant Conda environment**
+
+```
+conda activate modin
+```
+
+**4. Clone the GitHub repository**
+
+```
+git clone https://github.com/oneapi-src/oneAPI-samples.git
+cd oneAPI-samples/AI-and-Analytics/Getting-Started-Samples/Modin_GettingStarted
+```
+
+**5. Install dependencies**
+
+>**Note**: Before running the following commands, make sure your Conda/Python environment with AI Tools installed is activated
+
+```
+pip install -r requirements.txt
+pip install notebook
+```
+For Jupyter Notebook, refer to [Installing Jupyter](https://jupyter.org/install) for detailed installation instructions.
+
+## Run the Sample
+>**Note**: Before running the sample, make sure [Environment Setup](https://github.com/oneapi-src/oneAPI-samples/tree/master/AI-and-Analytics/Getting-Started-Samples/INC-Quantization-Sample-for-PyTorch#environment-setup) is completed.
+
+Go to the section which corresponds to the installation method chosen in [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html) to see relevant instructions:
+* [AI Tools Offline Installer (Validated)](#ai-tools-offline-installer-validated)
+* [Conda/PIP](#condapip)
+* [Docker](#docker)
+
+
+### AI Tools Offline Installer (Validated)
+
+**1. Register Conda kernel to Jupyter Notebook kernel**
+
+If the default path is used during the installation of AI Tools:
+```
+$HOME/intel/oneapi/intelpython/envs/modin/bin/python -m ipykernel install --user --name=modin
+```
+If a non-default path is used:
+```
+/bin/python -m ipykernel install --user --name=modin
+```
+**2. Launch Jupyter Notebook**
+
+```
+jupyter notebook --ip=0.0.0.0
+```
+**3. Follow the instructions to open the URL with the token in your browser**
+
+**4. Select the Notebook**
+
+```
+Modin_GettingStarted.ipynb
+```
+**5. Change the kernel to `modin`**
+
+**6. Run every cell in the Notebook in sequence**
+
+### Conda/PIP
+> **Note**: Before running the instructions below, make sure your Conda/Python environment with AI Tools installed is activated
+
+**1. Register Conda/Python kernel to Jupyter Notebook kernel**
+
+For Conda:
+```
+/bin/python -m ipykernel install --user --name=
+```
+To know , run `conda env list` and find your Conda environment path.
+
+For PIP:
+```
+python -m ipykernel install --user --name=
+```
+**2. Launch Jupyter Notebook**
+
+```
+jupyter notebook --ip=0.0.0.0
+```
+**3. Follow the instructions to open the URL with the token in your browser**
+
+**4. Select the Notebook**
+
+```
+Modin_GettingStarted.ipynb
+```
+**5. Change the kernel to ``**
+
+
+**6. Run every cell in the Notebook in sequence**
+
+### Docker
+AI Tools Docker images already have Get Started samples pre-installed. Refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
-1. Install Modin in a new conda environment.
-
- >**Note:** replace python=3.x with your own Python version
- ```
- conda create -n modin python=3.x -y
- conda activate modin
- conda install modin-all -c conda-forge -y
- ```
-
-2. Install Matplotlib.
- ```
- conda install -c conda-forge matplotlib -y
- ```
-
-3. Install Jupyter Notebook.
- ```
- conda install jupyter nb_conda_kernels -y
- ```
-
-4. Create a new kernel for Jupyter Notebook based on your activated conda environment. (This step is optional if you plan to open the Notebook on your local server.)
- ```
- conda install ipykernel
- python -m ipykernel install --user --name usr_modin
- ```
-## Run the `Modin* Get Started` Sample
-
-You can run the Jupyter notebook with the sample code on your local server or download the sample code from the notebook as a Python file and run it locally.
-
-### Run the Sample in Visual Studio Code* (Optional)
-
-You can use Visual Studio Code (VS Code) extensions to set your environment, create launch configurations, and browse and download samples.
-
-The basic steps to build and run a sample using VS Code include:
-
-1. Download a sample using the extension **Code Sample Browser for Intel® oneAPI Toolkits**.
-2. Configure the oneAPI environment with the extension **Environment Configurator for Intel(R) oneAPI Toolkits**.
-3. Open a Terminal in VS Code by clicking **Terminal** > **New Terminal**.
-4. Run the sample in the VS Code terminal using the instructions below.
-
-On Linux, you can debug your GPU application with GDB for Intel® oneAPI toolkits using the **Generate Launch Configurations** extension.
-
-To learn more about the extensions, see
-[Using Visual Studio Code with Intel® oneAPI Toolkits](https://software.intel.com/content/www/us/en/develop/documentation/using-vs-code-with-intel-oneapi/top.html).
-
-
-### In Jupyter Notebook
-
-1. Activate the conda environment.
- ```
- conda activate aikit-modin
- ```
-
-2. Start the Jupyter Notebook server.
- ```
- jupyter notebook
- ```
-
-3. Locate and open the Notebook.
- ```
- Modin_GettingStarted.ipynb
- ```
-
-4. Click the **Run** button to move through the cells in sequence.
-
-### Run the Python Script Locally
-
-1. Convert ``Modin_GettingStarted.ipynb`` to a Python file. There are two options.
-
- 1. Open the notebook and download the script as Python file: **File > Download as > Python (py)**.
-
- 2. Convert the notebook file to a Python script using commands similar to the following.
- ```
- jupyter nbconvert --to python Modin_GettingStarted.ipynb
- ```
-2. Run the Python script.
- ```
- ipython Modin_GettingStarted.py
- ```
### Expected Output
@@ -118,8 +157,10 @@ The expected cell output is shown in the `Modin_GettingStarted.ipynb` Notebook.
## License
Code samples are licensed under the MIT license. See
-[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt) for details.
+[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt)
+for details.
-Third party program Licenses can be found here: [third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt).
+Third party program Licenses can be found here:
+[third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt)
*Other names and brands may be claimed as the property of others. [Trademarks](https://www.intel.com/content/www/us/en/legal/trademarks.html)
diff --git a/AI-and-Analytics/Getting-Started-Samples/Modin_Vs_Pandas/README.md b/AI-and-Analytics/Getting-Started-Samples/Modin_Vs_Pandas/README.md
index 14f3f0f442..597328d84f 100644
--- a/AI-and-Analytics/Getting-Started-Samples/Modin_Vs_Pandas/README.md
+++ b/AI-and-Analytics/Getting-Started-Samples/Modin_Vs_Pandas/README.md
@@ -1,11 +1,11 @@
-# Modin* Vs. Pandas Performance Sample
+# Modin Vs. Pandas Performance Sample
-The `Modin* Vs. Pandas Performance` code illustrates how to use Modin* to replace the Pandas API. The sample compares the performance of Modin* and the performance of Pandas for specific dataframe operations.
+The `Modin Vs. Pandas Performance` code illustrates how to use Modin* to replace the Pandas API. The sample compares the performance of Modin and the performance of Pandas for specific dataframe operations.
| Area | Description
|:--- |:---
| Category | Concepts and Functionality
-| What you will learn | How to accelerate the Pandas API using Modin*.
+| What you will learn | How to accelerate the Pandas API using Modin.
| Time to complete | Less than 10 minutes
## Purpose
@@ -19,77 +19,138 @@ You can run the sample locally or in Google Colaboratory (Colab).
|:--- |:---
| OS | Ubuntu* 20.04 (or newer)
| Hardware | Intel® Core™ Gen10 Processor
Intel® Xeon® Scalable Performance processors
-| Software | Modin*
+| Software | Intel® Distribution of Modin*
+
+> **Note**: AI and Analytics samples are validated on AI Tools Offline Installer. For the full list of validated platforms refer to [Platform Validation](https://github.com/oneapi-src/oneAPI-samples/tree/master?tab=readme-ov-file#platform-validation).
+
## Key Implementation Details
-This code sample is implemented for CPU using Python programming language. The sample requires NumPy, Pandas, Modin* libraries, and the time module in Python.
+This code sample is implemented for CPU using Python programming language. The sample requires NumPy, Pandas, Modin libraries, and the time module in Python.
## Environment Setup
-If you want to run the sample on a local system using a command-line interface (CLI), you must install the Modin in a new Conda* environment first.
+You will need to download and install the following toolkits, tools, and components to use the sample.
+
-### Install Modin*
+**1. Get AI Tools**
-1. Create a Conda environment.
- ```
- conda create --name modin
- ```
-2. Activate the Conda environment.
- ```
- source activate modin
- ```
-3. Remove existing versions of Modin* (if any exist).
- ```
- conda remove modin --y
- ```
-4. Install Modin (v0.12.1 or newer).
- ```
- pip install modin[all]==0.12.1
- ```
-5. Install the NumPy and Pandas libraries.
- ```
- pip install numpy
- pip install pandas
- ```
-6. Install ipython to run the notebook on your system.
- ```
- pip install ipython
- ```
-### Run the Sample
+Required AI Tools: Modin
-1. Change to the directory containing the `Modin_Vs_Pandas.ipynb` notebook file on your local system.
+If you have not already, select and install these Tools via [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html). AI and Analytics samples are validated on AI Tools Offline Installer. It is recommended to select Offline Installer option in AI Tools Selector.
-2. Run the sample notebook.
- ```
- ipython Modin_Vs_Pandas.ipynb
- ```
+>**Note**: If Docker option is chosen in AI Tools Selector, refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
-## Run the `Modin* Vs Pandas Performance` Sample in Google Colaboratory
+**2. (Offline Installer) Activate the AI Tools bundle base environment**
+
+If the default path is used during the installation of AI Tools:
+```
+source $HOME/intel/oneapi/intelpython/bin/activate
+```
+If a non-default path is used:
+```
+source /bin/activate
+```
-1. Change to the directory containing the `Modin_Vs_Pandas.ipynb` notebook file on your local system.
+
+**3. (Offline Installer) Activate relevant Conda environment**
+
+```
+conda activate modin
+```
+
+**4. Clone the GitHub repository**
+
+
+```
+git clone https://github.com/oneapi-src/oneAPI-samples.git
+cd oneAPI-samples/AI-and-Analytics/Getting-Started-Samples/Modin_Vs_Pandas
+```
-2. Open the notebook file, and remove the prepended number sign (#) symbol from the following lines:
- ```
- #!pip install modin[all]==0.12.1
- #!pip install numpy
- #!pip install pandas
- ```
- These changes will install the Modin and the NumPy and Pandas libraries when run in the Colab notebook.
+**5. Install dependencies**
+
+>**Note**: Before running the following commands, make sure your Conda/Python environment with AI Tools installed is activated
-3. Save your changes.
+```
+pip install -r requirements.txt
+pip install notebook
+```
+For Jupyter Notebook, refer to [Installing Jupyter](https://jupyter.org/install) for detailed installation instructions.
-4. Open [Google Colaboratory](https://colab.research.google.com/?utm_source=scs-index).
+## Run the Sample
+>**Note**: Before running the sample, make sure [Environment Setup](https://github.com/oneapi-src/oneAPI-samples/tree/master/AI-and-Analytics/Getting-Started-Samples/INC-Quantization-Sample-for-PyTorch#environment-setup) is completed.
-5. Sign in to Colab using your Google account.
+Go to the section which corresponds to the installation method chosen in [AI Tools Selector](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-tools-selector.html) to see relevant instructions:
+* [AI Tools Offline Installer (Validated)](#ai-tools-offline-installer-validated)
+* [Conda/PIP](#condapip)
+* [Docker](#docker)
+
+### AI Tools Offline Installer (Validated)
-6. Select **File** > **Upload notebook**.
+**1. Register Conda kernel to Jupyter Notebook kernel**
-7. Upload the modified notebook file.
+If the default path is used during the installation of AI Tools:
+```
+$HOME/intel/oneapi/intelpython/envs/modin/bin/python -m ipykernel install --user --name=modin
+```
+If a non-default path is used:
+```
+/bin/python -m ipykernel install --user --name=modin
+```
-8. Change to the notebook, and click **Open**.
+**2. Launch Jupyter Notebook**
+
+```
+jupyter notebook --ip=0.0.0.0
+```
+**3. Follow the instructions to open the URL with the token in your browser**
-9. Select **Runtime** > **Run all**.
+**4. Select the Notebook**
+
+```
+Modin_Vs_Pandas.ipynb
+```
+**5. Change the kernel to `modin`**
+
+**6. Run every cell in the Notebook in sequence**
+
+
+### Conda/PIP
+> **Note**: Before running the instructions below, make sure your Conda/Python environment with AI Tools installed is activated
+
+**1. Register Conda/Python kernel to Jupyter Notebook kernel**
+
+For Conda:
+```
+/bin/python -m ipykernel install --user --name=
+```
+To know , run `conda env list` and find your Conda environment path.
+
+For PIP:
+```
+python -m ipykernel install --user --name=
+```
+**2. Launch Jupyter Notebook**
+
+```
+jupyter notebook --ip=0.0.0.0
+```
+**3. Follow the instructions to open the URL with the token in your browser**
+
+**4. Select the Notebook**
+```
+Modin_Vs_Pandas.ipynb
+```
+**5. Change the kernel to ``**
+
+
+**6. Run every cell in the Notebook in sequence**
+
+### Docker
+AI Tools Docker images already have Get Started samples pre-installed. Refer to [Working with Preset Containers](https://github.com/intel/ai-containers/tree/main/preset) to learn how to run the docker and samples.
+
+
## Example Output
@@ -109,8 +170,11 @@ Example expected cell output is included in `Modin_Vs_Pandas.ipynb`.
## License
Code samples are licensed under the MIT license. See
-[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt) for details.
+[License.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/License.txt)
+for details.
-Third party program licenses are at [third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt).
+Third party program Licenses can be found here:
+[third-party-programs.txt](https://github.com/oneapi-src/oneAPI-samples/blob/master/third-party-programs.txt)
*Other names and brands may be claimed as the property of others. [Trademarks](https://www.intel.com/content/www/us/en/legal/trademarks.html)
+