Skip to content

Commit

Permalink
Remove embedding overlay and facenet references
Browse files Browse the repository at this point in the history
  • Loading branch information
jsalas98 committed Mar 30, 2021
1 parent 6233f97 commit 50c6ba5
Show file tree
Hide file tree
Showing 9 changed files with 2 additions and 89 deletions.
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,3 @@ cscope.out
# Examples
tests/examples/classification/classification
tests/examples/detection/detection
tests/examples/embedding/embedding
6 changes: 2 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ GstInference is an open-source project from Ridgerun Engineering that provides a

This repo uses **[R²Inference](https://github.com/RidgeRun/r2inference)**, an abstraction layer in C/C++ for a variety of machine learning frameworks. With R²Inference a single C/C++ application may work with models on different frameworks. This is useful to execute inference taking advantage of different hardware resources such as CPU, GPU, or AI optimized acelerators.

GstInference provides several example elements for common applications, such as [`Inception v4`](ext/r2inference/gstinceptionv4.c) for image classification, [`TinyYOLO v2`](ext/r2inference/gsttinyyolov2.c) for object detection, and [`FaceNet`](ext/r2inference/gstfacenetv1.c) for face recognition. Examples are provided for performing inference on any GStreamer video stream.
GstInference provides several example elements for common applications, such as [`Inception v4`](ext/r2inference/gstinceptionv4.c) for image classification and [`TinyYOLO v2`](ext/r2inference/gsttinyyolov2.c) for object detection. Examples are provided for performing inference on any GStreamer video stream.

<img src="https://developer.ridgerun.com/wiki/images/thumb/4/4f/GstInference-examples.jpeg/800px-GstInference-examples.jpeg" width="800">

Expand All @@ -27,8 +27,6 @@ Follow the steps to get GstInference running on your platform:

We provide GStreamer [example pipelines](https://developer.ridgerun.com/wiki/index.php?title=GstInference/Example_pipelines) for all our suported platforms,architectures and backends.

We also provide [example applications](https://developer.ridgerun.com/wiki/index.php?title=GstInference/Example_Applications) for classification, detection and face recognition.

Our [smart lock](tests/examples/face_detection/README.md) example can get you started with a real security camera application.
We also provide [example applications](https://developer.ridgerun.com/wiki/index.php?title=GstInference/Example_Applications) for classification and detection.

We also provide example trained models on our [model zoo](https://developer.ridgerun.com/wiki/index.php?title=GstInference/Model_Zoo)
7 changes: 0 additions & 7 deletions ext/opencv/gstplugin.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,13 @@
#include "config.h"
#endif

#include "gstembeddingoverlay.h"
#include "gstinferenceoverlay.h"

static gboolean
plugin_init (GstPlugin * plugin)
{
gboolean ret = TRUE;

ret =
gst_element_register (plugin, "embeddingoverlay", GST_RANK_NONE,
GST_TYPE_EMBEDDING_OVERLAY);
if (!ret) {
goto out;
}
ret =
gst_element_register (plugin, "inferenceoverlay", GST_RANK_NONE,
GST_TYPE_INFERENCE_OVERLAY);
Expand Down
1 change: 0 additions & 1 deletion ext/opencv/meson.build
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
gstinference_sources = [
'gstplugin.cc',
'gstinferenceoverlay.cc',
'gstembeddingoverlay.cc'
]

if opencv_dep.found()
Expand Down
7 changes: 0 additions & 7 deletions ext/r2inference/gstinference.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
#include "gstinceptionv4.h"
#include "gsttinyyolov2.h"
#include "gsttinyyolov3.h"
#include "gstfacenetv1.h"
#include "gstresnet50v1.h"
#include "gstmobilenetv2.h"
#include "gstmobilenetv2ssd.h"
Expand Down Expand Up @@ -90,12 +89,6 @@ plugin_init (GstPlugin * plugin)
goto out;
}

ret = gst_element_register (plugin, "facenetv1", GST_RANK_NONE,
GST_TYPE_FACENETV1);
if (!ret) {
goto out;
}

ret = gst_element_register (plugin, "mobilenetv2ssd", GST_RANK_NONE,
GST_TYPE_MOBILENETV2SSD);
if (!ret) {
Expand Down
1 change: 0 additions & 1 deletion ext/r2inference/meson.build
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
gstinference_sources = [
'gstfacenetv1.c',
'gstinceptionv1.c',
'gstinceptionv2.c',
'gstinceptionv3.c',
Expand Down
57 changes: 0 additions & 57 deletions gst-libs/gst/r2inference/gstinferencepreprocess.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,63 +132,6 @@ gst_normalize (GstVideoFrame * inframe, GstVideoFrame * outframe, gdouble mean,
return TRUE;
}

gboolean
gst_normalize_face (GstVideoFrame * inframe, GstVideoFrame * outframe,
gint model_channels)
{
gint i, j, pixel_stride, width, height, channels;
gdouble mean, std, variance, sum, normalized, R, G, B;
gint first_index = 0, last_index = 0, offset = 0;

channels = GST_VIDEO_FRAME_N_COMPONENTS (inframe);
pixel_stride = GST_VIDEO_FRAME_COMP_STRIDE (inframe, 0) / channels;
width = GST_VIDEO_FRAME_WIDTH (inframe);
height = GST_VIDEO_FRAME_HEIGHT (inframe);

sum = 0;
normalized = 0;

g_return_val_if_fail (inframe != NULL, FALSE);
g_return_val_if_fail (outframe != NULL, FALSE);

for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
sum =
sum + (((guchar *) inframe->data[0])[(i * pixel_stride +
j) * channels + 0]);
sum =
sum + (((guchar *) inframe->data[0])[(i * pixel_stride +
j) * channels + 1]);
sum =
sum + (((guchar *) inframe->data[0])[(i * pixel_stride +
j) * channels + 2]);
}
}

mean = sum / (float) (width * height * channels);

for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
R = (gfloat) ((((guchar *) inframe->data[0])[(i * pixel_stride +
j) * channels + 0])) - mean;
G = (gfloat) ((((guchar *) inframe->data[0])[(i * pixel_stride +
j) * channels + 1])) - mean;
B = (gfloat) ((((guchar *) inframe->data[0])[(i * pixel_stride +
j) * channels + 2])) - mean;
normalized = normalized + pow (R, 2);
normalized = normalized + pow (G, 2);
normalized = normalized + pow (B, 2);
}
}

variance = normalized / (float) (width * height * channels);
std = 1 / sqrt (variance);

gst_apply_means_std (inframe, outframe, first_index, last_index, offset,
channels, mean, mean, mean, std, std, std, model_channels);
return TRUE;
}

gboolean
gst_subtract_mean (GstVideoFrame * inframe, GstVideoFrame * outframe,
gdouble mean_red, gdouble mean_green, gdouble mean_blue,
Expand Down
10 changes: 0 additions & 10 deletions gst-libs/gst/r2inference/gstinferencepreprocess.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,16 +38,6 @@ G_BEGIN_DECLS

gboolean gst_normalize(GstVideoFrame * inframe, GstVideoFrame * outframe, gdouble mean, gdouble std, gint model_channels);

/**
* \brief Especial normalization used for facenet
*
* \param inframe The input frame
* \param outframe The output frame after preprocess
* \param model_channels The number of channels of the model
*/

gboolean gst_normalize_face(GstVideoFrame * inframe, GstVideoFrame * outframe, gint model_channels);

/**
* \brief Substract the mean value to every pixel
*
Expand Down
1 change: 0 additions & 1 deletion tests/examples/meson.build
Original file line number Diff line number Diff line change
@@ -1,3 +1,2 @@
subdir('classification')
subdir('detection')
subdir('embedding')

0 comments on commit 50c6ba5

Please sign in to comment.