From 0a6f2bef0d7c1fdff6e3e438ee18d77fc27b2262 Mon Sep 17 00:00:00 2001 From: Felipe Armoni Date: Sun, 21 Jul 2024 14:38:09 -0300 Subject: [PATCH 1/3] Updated quickstart --- docs/quickstart/run_a_model.md | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/docs/quickstart/run_a_model.md b/docs/quickstart/run_a_model.md index b450390acf..4465492c81 100644 --- a/docs/quickstart/run_a_model.md +++ b/docs/quickstart/run_a_model.md @@ -41,13 +41,24 @@ In the code above, we loaded a model and then we used that model's `infer(...)` Running inference is fun but it's not much to look at. Let's add some code to visualize our results. ```python -from inference import get_model +from io import BytesIO + +import requests import supervision as sv -import cv2 +from inference import get_model +from PIL import Image +from PIL.ImageFile import ImageFile -# define the image url to use for inference -image_file = "people-walking.jpg" -image = cv2.imread(image_file) + +def load_image_from_url(url: str) -> ImageFile: + response = requests.get(url) + response.raise_for_status() # check if the request was successful + image = Image.open(BytesIO(response.content)) + return image + + +# load the image from an url +image = load_image_from_url("https://media.roboflow.com/inference/people-walking.jpg") # load a pre-trained yolov8n model model = get_model(model_id="yolov8n-640") @@ -59,21 +70,17 @@ results = model.infer(image)[0] detections = sv.Detections.from_inference(results) # create supervision annotators -bounding_box_annotator = sv.BoundingBoxAnnotator() +bounding_box_annotator = sv.BoxAnnotator() label_annotator = sv.LabelAnnotator() # annotate the image with our inference results -annotated_image = bounding_box_annotator.annotate( - scene=image, detections=detections) -annotated_image = label_annotator.annotate( - scene=annotated_image, detections=detections) +annotated_image = bounding_box_annotator.annotate(scene=image, detections=detections) +annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections) # display the image sv.plot_image(annotated_image) ``` -The `people-walking.jpg` file is hosted here. - ![People Walking Annotated](https://storage.googleapis.com/com-roboflow-marketing/inference/people-walking-annotated.jpg) ## Summary From 28f9a764004dc5894b91274d36802e00a9695e14 Mon Sep 17 00:00:00 2001 From: Felipe Armoni Date: Sun, 21 Jul 2024 15:41:20 -0300 Subject: [PATCH 2/3] Add model quickstart to the side bar --- mkdocs.yml | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index a2bf2ca872..0999d68b4e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -33,10 +33,10 @@ nav: - Enterprise Features: enterprise/enterprise.md - Inference Basics: - Roboflow Ecosystem: quickstart/roboflow_ecosystem.md - - "Models: Popular": quickstart/aliases.md - - "Models: Fine-tuned": quickstart/explore_models.md - - "Models: Universe": quickstart/load_from_universe.md - - "Models: Local Weights": models/from_local_weights.md + - 'Models: Popular': quickstart/aliases.md + - 'Models: Fine-tuned': quickstart/explore_models.md + - 'Models: Universe': quickstart/load_from_universe.md + - 'Models: Local Weights': models/from_local_weights.md - Supported Fine-Tuned Models: - YOLOv10: fine-tuned/yolov10.md - YOLOv9: fine-tuned/yolov9.md @@ -55,6 +55,7 @@ nav: - Segment Anything (Segmentation): foundation/sam.md - YOLO-World (Object Detection): foundation/yolo_world.md - Run a Model: + - Getting started: quickstart/run_a_model.md - Predict on an Image Over HTTP: quickstart/run_model_on_image.md - Predict on a Video, Webcam or RTSP Stream: quickstart/run_model_on_rtsp_webcam.md - Predict Over UDP: quickstart/run_model_over_udp.md @@ -102,7 +103,7 @@ nav: - Cookbooks: cookbooks.md theme: - name: "material" + name: 'material' logo: inference-icon.png favicon: inference-icon.png custom_dir: docs/theme @@ -122,13 +123,13 @@ theme: palette: - scheme: default - primary: "custom" + primary: 'custom' toggle: icon: material/brightness-7 name: Switch to dark mode - scheme: slate - primary: "custom" + primary: 'custom' toggle: icon: material/brightness-4 name: Switch to light mode @@ -164,6 +165,6 @@ markdown_extensions: permalink: true extra_javascript: - - "https://widget.kapa.ai/kapa-widget.bundle.js" - - "javascript/init_kapa_widget.js" - - "javascript/cookbooks.js" + - 'https://widget.kapa.ai/kapa-widget.bundle.js' + - 'javascript/init_kapa_widget.js' + - 'javascript/cookbooks.js' From 498536fa8366575d659288fc52d0a75e3c788814 Mon Sep 17 00:00:00 2001 From: Felipe Armoni Date: Sun, 21 Jul 2024 16:01:16 -0300 Subject: [PATCH 3/3] Explicit GPU warn on Cogvlm --- docs/foundation/cogvlm.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/foundation/cogvlm.md b/docs/foundation/cogvlm.md index da149f8c21..74c1491dfe 100644 --- a/docs/foundation/cogvlm.md +++ b/docs/foundation/cogvlm.md @@ -26,9 +26,13 @@ We recommend using CogVLM paired with inference HTTP API adjusted to run in GPU with our `inference-cli` tool. Run the following command to set up environment and run the API under `http://localhost:9001` +!!! warning + Make sure that you are running this at machine with an NVidia GPU! Otherwise CogVLM will not be available. + + ```bash pip install inference inference-cli inference-sdk -inference server start # make sure that you are running this at machine with GPU! Otherwise CogVLM will not be available +inference server start ``` Let's ask a question about the following image: