Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add StrongSORT, DeepOCSORT, OCSORT from boxmot library. #10

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ packages = [
]

[tool.poetry.dependencies]
python = "^3.8"
python = "^3.9"
packaging = {version = "23.1"}
aenum = {version = "3.1.15"}
tqdm = {version = "^4.65.0"}
Expand Down Expand Up @@ -80,6 +80,14 @@ mediapy = {version = "*"}
einshape = {version = "*"}
ipympl = {version = "*"}

# boxmot
filterpy = {version = "*"}
scikit-learn = {version = "*"}
scipy = "^1.12.0"
loguru = "^0.7.2"
gdown = "^5.0.1"
pyyaml = "^6.0.1"

# [[tool.poetry.source]]
# name = "torch+cu118"
# url = "https://download.pytorch.org/whl/cu118"
Expand Down
24 changes: 12 additions & 12 deletions src/juxtapose/rtm.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from juxtapose.data import load_inference_source
from juxtapose.detectors import get_detector
from juxtapose.pose.rtmpose import RTMPose
from juxtapose.trackers import Tracker, TRACKER_MAP
from juxtapose.trackers import Tracker, TRACKER_MAP, BOXMOT_TRACKER_MAP
from juxtapose.types import (
DETECTOR_TYPES,
POSE_ESTIMATOR_TYPES,
Expand Down Expand Up @@ -74,10 +74,13 @@ def __init__(
self.rtmpose = RTMPose(pose.split("-")[1], device=device)
self.annotator = annotator
self.box_annotator = sv.BoxAnnotator()

self.device = device
self.tracker_type = tracker

if tracker not in TRACKER_MAP.keys():
if (
tracker not in TRACKER_MAP.keys()
and tracker not in BOXMOT_TRACKER_MAP.keys()
):
self.tracker_type = None

self.dataset = None
Expand All @@ -88,12 +91,13 @@ def setup_source(self, source, imgsz=640, vid_stride=1) -> None:
source=source, imgsz=imgsz, vid_stride=vid_stride
)
self.source_type = self.dataset.source_type
self.vid_path, self.vid_writer = [None] * self.dataset.bs, [
None
] * self.dataset.bs
self.vid_path, self.vid_writer = (
[None] * self.dataset.bs,
[None] * self.dataset.bs,
)

def setup_tracker(self) -> None:
self.tracker = Tracker(self.tracker_type).tracker
self.tracker = Tracker(self.tracker_type, self.device)

def setup_detector(self, det, device, captions):
return get_detector(det, device=device, captions=captions)
Expand Down Expand Up @@ -235,11 +239,7 @@ def stream_inference(
# multi object tracking (adjust bounding boxes)
with profilers[1]:
if self.tracker_type:
detections: Detections = self.tracker.update(
bboxes=detections.xyxy,
confidence=detections.confidence,
labels=detections.labels,
)
detections: Detections = self.tracker.update(detections, im)
track_id = detections.track_id
else:
track_id = np.array([""] * len(detections.xyxy))
Expand Down
13 changes: 11 additions & 2 deletions src/juxtapose/trackers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,17 @@
from .bot_sort import BOTSORT
from .byte_tracker import BYTETracker
from .track import Tracker, TRACKER_MAP
from .track import Tracker, TRACKER_MAP, BOXMOT_TRACKER_MAP
from .boxmot import StrongSORT, DeepOCSORT


# from .track import register_tracker

__all__ = "Tracker", "TRACKER_MAP", "BOTSORT", "BYTETracker" # allow simpler import
__all__ = (
"Tracker",
"TRACKER_MAP",
"BOXMOT_TRACKER_MAP",
"BOTSORT",
"BYTETracker",
"StrongSORT",
"DeepOCSORT",
) # allow simpler import
37 changes: 37 additions & 0 deletions src/juxtapose/trackers/boxmot/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Mikel Broström 🔥 Yolo Tracking 🧾 AGPL-3.0 license

__version__ = "10.0.41"

from juxtapose.trackers.boxmot.postprocessing.gsi import gsi
from juxtapose.trackers.boxmot.tracker_zoo import create_tracker, get_tracker_config

# from juxtapose.trackers.boxmot.trackers.botsort.bot_sort import BoTSORT
# from juxtapose.trackers.boxmot.trackers.bytetrack.byte_tracker import BYTETracker
from juxtapose.trackers.boxmot.trackers.deepocsort.deep_ocsort import (
DeepOCSort as DeepOCSORT,
)
from juxtapose.trackers.boxmot.trackers.hybridsort.hybridsort import HybridSORT
from juxtapose.trackers.boxmot.trackers.ocsort.ocsort import OCSort as OCSORT
from juxtapose.trackers.boxmot.trackers.strongsort.strong_sort import StrongSORT

TRACKERS = [
# "bytetrack",
# "botsort",
"strongsort",
"ocsort",
"deepocsort",
"hybridsort",
]

__all__ = (
"__version__",
"StrongSORT",
"OCSORT",
# "BYTETracker",
# "BoTSORT",
"DeepOCSORT",
"HybridSORT",
"create_tracker",
"get_tracker_config",
"gsi",
)
16 changes: 16 additions & 0 deletions src/juxtapose/trackers/boxmot/appearance/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Mikel Broström 🔥 Yolo Tracking 🧾 AGPL-3.0 license

import pandas as pd


def export_formats():
# yolo tracking export formats
x = [
["PyTorch", "-", ".pt", True, True],
["TorchScript", "torchscript", ".torchscript", True, True],
["ONNX", "onnx", ".onnx", True, True],
["OpenVINO", "openvino", "_openvino_model", True, False],
["TensorRT", "engine", ".engine", False, True],
["TensorFlow Lite", "tflite", ".tflite", True, False],
]
return pd.DataFrame(x, columns=["Format", "Argument", "Suffix", "CPU", "GPU"])
108 changes: 108 additions & 0 deletions src/juxtapose/trackers/boxmot/appearance/backbones/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
# Mikel Broström 🔥 Yolo Tracking 🧾 AGPL-3.0 license

from __future__ import absolute_import

# from juxtapose.trackers.boxmot.appearance.backbones.clip.make_model import make_model
# from juxtapose.trackers.boxmot.appearance.backbones.hacnn import HACNN
# from juxtapose.trackers.boxmot.appearance.backbones.lmbn.lmbn_n import LMBN_n
# from juxtapose.trackers.boxmot.appearance.backbones.mlfn import mlfn
# from juxtapose.trackers.boxmot.appearance.backbones.mobilenetv2 import (
# mobilenetv2_x1_0,
# mobilenetv2_x1_4,
# )
from juxtapose.trackers.boxmot.appearance.backbones.osnet import (
osnet_ibn_x1_0,
osnet_x0_5,
osnet_x0_25,
osnet_x0_75,
osnet_x1_0,
)

# from juxtapose.trackers.boxmot.appearance.backbones.osnet_ain import (
# osnet_ain_x0_5,
# osnet_ain_x0_25,
# osnet_ain_x0_75,
# osnet_ain_x1_0,
# )
# from juxtapose.trackers.boxmot.appearance.backbones.resnet import resnet50, resnet101

NR_CLASSES_DICT = {"market1501": 751, "duke": 702, "veri": 576, "vehicleid": 576}


__model_factory = {
# image classification models
# "resnet50": resnet50,
# "resnet101": resnet101,
# "mobilenetv2_x1_0": mobilenetv2_x1_0,
# "mobilenetv2_x1_4": mobilenetv2_x1_4,
# reid-specific models
# "hacnn": HACNN,
# "mlfn": mlfn,
"osnet_x1_0": osnet_x1_0,
"osnet_x0_75": osnet_x0_75,
"osnet_x0_5": osnet_x0_5,
"osnet_x0_25": osnet_x0_25,
# "osnet_ibn_x1_0": osnet_ibn_x1_0,
# "osnet_ain_x1_0": osnet_ain_x1_0,
# "osnet_ain_x0_75": osnet_ain_x0_75,
# "osnet_ain_x0_5": osnet_ain_x0_5,
# "osnet_ain_x0_25": osnet_ain_x0_25,
# "lmbn_n": LMBN_n,
# "clip": make_model,
}


def show_avai_models():
"""Displays available models.

Examples::
>>> from torchreid import models
>>> models.show_avai_models()
"""
print(list(__model_factory.keys()))


def get_nr_classes(weigths):
num_classes = [
value for key, value in NR_CLASSES_DICT.items() if key in str(weigths.name)
]
if len(num_classes) == 0:
num_classes = 1
else:
num_classes = num_classes[0]
return num_classes


def build_model(name, num_classes, loss="softmax", pretrained=True, use_gpu=True):
"""A function wrapper for building a model.

Args:
name (str): model name.
num_classes (int): number of training identities.
loss (str, optional): loss function to optimize the model. Currently
supports "softmax" and "triplet". Default is "softmax".
pretrained (bool, optional): whether to load ImageNet-pretrained weights.
Default is True.
use_gpu (bool, optional): whether to use gpu. Default is True.

Returns:
nn.Module

Examples::
>>> from torchreid import models
>>> model = models.build_model('resnet50', 751, loss='softmax')
"""
avai_models = list(__model_factory.keys())
if name not in avai_models:
raise KeyError("Unknown model: {}. Must be one of {}".format(name, avai_models))
# if "clip" in name:
# from juxtapose.trackers.boxmot.appearance.backbones.clip.config.defaults import (
# _C as cfg,
# )

# return __model_factory[name](
# cfg, num_class=num_classes, camera_num=2, view_num=1
# )
return __model_factory[name](
num_classes=num_classes, loss=loss, pretrained=pretrained, use_gpu=use_gpu
)
Loading