Skip to content

Commit

Permalink
fea: add versionning + pre-commit
Browse files Browse the repository at this point in the history
  • Loading branch information
TomDarmon committed Nov 13, 2023
1 parent c38868f commit bc75900
Show file tree
Hide file tree
Showing 8 changed files with 120 additions and 30 deletions.
14 changes: 14 additions & 0 deletions .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
## Description of the goal of the PR


## Changes this PR introduces (fill it before implementation)

- [ ]

## Checklist before requesting a review
- [ ] The CI pipeline passes
- [ ] I have typed my code
- [ ] I have created / updated the docstrings
- [ ] I have updated the README, if relevant
- [ ] I have updated the requirements, if relevant
- [ ] I have tested my code
37 changes: 37 additions & 0 deletions .github/workflows/release.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# This workflow triggers the CI, updates the version, and uploads the release to GitHub and Google Cloud Storage when a push is made to either the 'main' or 'develop' branch.
#
# Workflow Steps:
#
# 1. Ci is triggered using the CI workflow defined in .github/workflows/ci.yaml
# 2. If it succeeds, the version is updated using Python Semantic Release
# 3. The release is uploaded to GitHub (same step and GitHub action)

name: CI and Release on main

on:
push:
branches:
- main

jobs:
CI:
uses: ./.github/workflows/ci.yaml

Release:
runs-on: ubuntu-latest
concurrency: Release
needs: CI
permissions:
id-token: write
contents: write

steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
token: ${{ secrets.GH_TOKEN }}

- name: Python Semantic Release
uses: python-semantic-release/python-semantic-release@master
with:
github_token: ${{ secrets.GH_TOKEN }}
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
<div align="center">
<h2>
ByteTrack-Pip: Packaged version of the ByteTrack repository
ByteTrack-Pip: Packaged version of the ByteTrack repository
</h2>
<h4>
<img width="700" alt="teaser" src="assets/demo.gif">
Expand All @@ -19,7 +19,7 @@ This repo is a packaged version of the [ByteTrack](https://github.com/ifzhang/By
pip install bytetracker
```

### Detection Model + ByteTrack
### Detection Model + ByteTrack
```python
from bytetracker import BYTETracker

Expand Down
36 changes: 21 additions & 15 deletions bytetracker/byte_tracker.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,23 @@
import numpy as np
import torch

from bytetracker import matching
from bytetracker.basetrack import BaseTrack, TrackState
from bytetracker.kalman_filter import KalmanFilter


def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y = np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y


def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y = np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
Expand All @@ -28,7 +29,6 @@ class STrack(BaseTrack):
shared_kalman = KalmanFilter()

def __init__(self, tlwh, score, cls):

# wait activate
self._tlwh = np.asarray(tlwh, dtype=float)
self.kalman_filter = None
Expand All @@ -53,7 +53,9 @@ def multi_predict(stracks):
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(
multi_mean, multi_covariance
)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
Expand Down Expand Up @@ -98,7 +100,9 @@ def update(self, new_track, frame_id):
self.cls = new_track.cls

new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)
)
self.state = TrackState.Tracked
self.is_activated = True

Expand Down Expand Up @@ -188,10 +192,6 @@ def update(self, dets, _):
confs = dets[:, 4]
clss = dets[:, 5]

classes = clss.numpy()
xyxys = xyxys.numpy()
confs = confs.numpy()

remain_inds = confs > self.track_thresh
inds_low = confs > 0.1
inds_high = confs < self.track_thresh
Expand All @@ -204,8 +204,8 @@ def update(self, dets, _):
scores_keep = confs[remain_inds]
scores_second = confs[inds_second]

clss_keep = classes[remain_inds]
clss_second = classes[remain_inds]
clss_keep = clss[remain_inds]
clss_second = clss[remain_inds]

if len(dets) > 0:
"""Detections"""
Expand Down Expand Up @@ -245,10 +245,14 @@ def update(self, dets, _):
# association the untrack to the low score detections
if len(dets_second) > 0:
"""Detections"""
detections_second = [STrack(xywh, s, c) for (xywh, s, c) in zip(dets_second, scores_second, clss_second)]
detections_second = [
STrack(xywh, s, c) for (xywh, s, c) in zip(dets_second, scores_second, clss_second)
]
else:
detections_second = []
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
r_tracked_stracks = [
strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked
]
dists = matching.iou_distance(r_tracked_stracks, detections_second)
matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
Expand Down Expand Up @@ -303,7 +307,9 @@ def update(self, dets, _):
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(
self.tracked_stracks, self.lost_stracks
)
# get scores of lost tracks
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
outputs = []
Expand Down
4 changes: 2 additions & 2 deletions bytetracker/config/byte_track.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
byte_track:
track_thresh: 0.45
track_buffer: 25
match_thresh: 0.8
frame_rate: 30
match_thresh: 0.8
frame_rate: 30
24 changes: 20 additions & 4 deletions bytetracker/kalman_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,17 @@
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
function and used as Mahalanobis gating threshold.
"""
chi2inv95 = {1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919}
chi2inv95 = {
1: 3.8415,
2: 5.9915,
3: 7.8147,
4: 9.4877,
5: 11.070,
6: 12.592,
7: 14.067,
8: 15.507,
9: 16.919,
}


class KalmanFilter(object):
Expand Down Expand Up @@ -110,7 +120,9 @@ def predict(self, mean, covariance):

# mean = np.dot(self._motion_mat, mean)
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
covariance = (
np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
)

return mean, covariance

Expand Down Expand Up @@ -213,7 +225,9 @@ def update(self, mean, covariance, measurement):
innovation = measurement - projected_mean

new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T))
new_covariance = covariance - np.linalg.multi_dot(
(kalman_gain, projected_cov, kalman_gain.T)
)
return new_mean, new_covariance

def gating_distance(self, mean, covariance, measurements, only_position=False, metric="maha"):
Expand Down Expand Up @@ -251,7 +265,9 @@ def gating_distance(self, mean, covariance, measurements, only_position=False, m
return np.sum(d * d, axis=1)
elif metric == "maha":
cholesky_factor = np.linalg.cholesky(covariance)
z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True)
z = scipy.linalg.solve_triangular(
cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True
)
squared_maha = np.sum(z * z, axis=0)
return squared_maha
else:
Expand Down
29 changes: 23 additions & 6 deletions bytetracker/matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,11 @@ def _indices_to_matches(cost_matrix, indices, thresh):

def linear_assignment(cost_matrix, thresh):
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
return (
np.empty((0, 2), dtype=int),
tuple(range(cost_matrix.shape[0])),
tuple(range(cost_matrix.shape[1])),
)
matches, unmatched_a, unmatched_b = [], [], []
cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
for ix, mx in enumerate(x):
Expand All @@ -60,7 +64,10 @@ def ious(atlbrs, btlbrs):
if ious.size == 0:
return ious

ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32))
ious = bbox_ious(
np.ascontiguousarray(atlbrs, dtype=np.float32),
np.ascontiguousarray(btlbrs, dtype=np.float32),
)

return ious

Expand Down Expand Up @@ -137,7 +144,9 @@ def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False):
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position)
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position
)
cost_matrix[row, gating_distance > gating_threshold] = np.inf
return cost_matrix

Expand All @@ -149,7 +158,9 @@ def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position, metric="maha")
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position, metric="maha"
)
cost_matrix[row, gating_distance > gating_threshold] = np.inf
cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance
return cost_matrix
Expand Down Expand Up @@ -195,12 +206,18 @@ def bbox_ious(boxes, query_boxes):
overlaps = np.zeros((N, K), dtype=np.float32)

for k in range(K):
box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (
query_boxes[k, 3] - query_boxes[k, 1] + 1
)
for n in range(N):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
if ih > 0:
ua = float((boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1) + box_area - iw * ih)
ua = float(
(boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
+ box_area
- iw * ih
)
overlaps[n, k] = iw * ih / ua
return overlaps
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
lapx>=0.5.5
scipy==1.9.3
torch>=1.13
torch>=1.13

0 comments on commit bc75900

Please sign in to comment.