From bc75900ae8570080b82768464956269c0ccdd5eb Mon Sep 17 00:00:00 2001 From: TomDarmon Date: Mon, 13 Nov 2023 15:26:35 +0100 Subject: [PATCH] fea: add versionning + pre-commit --- .github/PULL_REQUEST_TEMPLATE.md | 14 +++++++++++ .github/workflows/release.yaml | 37 ++++++++++++++++++++++++++++++ README.md | 4 ++-- bytetracker/byte_tracker.py | 36 +++++++++++++++++------------ bytetracker/config/byte_track.yaml | 4 ++-- bytetracker/kalman_filter.py | 24 +++++++++++++++---- bytetracker/matching.py | 29 ++++++++++++++++++----- requirements.txt | 2 +- 8 files changed, 120 insertions(+), 30 deletions(-) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/workflows/release.yaml diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..5cd2e48 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,14 @@ +## Description of the goal of the PR + + +## Changes this PR introduces (fill it before implementation) + +- [ ] + +## Checklist before requesting a review +- [ ] The CI pipeline passes +- [ ] I have typed my code +- [ ] I have created / updated the docstrings +- [ ] I have updated the README, if relevant +- [ ] I have updated the requirements, if relevant +- [ ] I have tested my code \ No newline at end of file diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..0d84c66 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,37 @@ +# This workflow triggers the CI, updates the version, and uploads the release to GitHub and Google Cloud Storage when a push is made to either the 'main' or 'develop' branch. +# +# Workflow Steps: +# +# 1. Ci is triggered using the CI workflow defined in .github/workflows/ci.yaml +# 2. If it succeeds, the version is updated using Python Semantic Release +# 3. The release is uploaded to GitHub (same step and GitHub action) + +name: CI and Release on main + +on: + push: + branches: + - main + +jobs: + CI: + uses: ./.github/workflows/ci.yaml + + Release: + runs-on: ubuntu-latest + concurrency: Release + needs: CI + permissions: + id-token: write + contents: write + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.GH_TOKEN }} + + - name: Python Semantic Release + uses: python-semantic-release/python-semantic-release@master + with: + github_token: ${{ secrets.GH_TOKEN }} \ No newline at end of file diff --git a/README.md b/README.md index 5f8ed9d..3b7e621 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

- ByteTrack-Pip: Packaged version of the ByteTrack repository + ByteTrack-Pip: Packaged version of the ByteTrack repository

teaser @@ -19,7 +19,7 @@ This repo is a packaged version of the [ByteTrack](https://github.com/ifzhang/By pip install bytetracker ``` -### Detection Model + ByteTrack +### Detection Model + ByteTrack ```python from bytetracker import BYTETracker diff --git a/bytetracker/byte_tracker.py b/bytetracker/byte_tracker.py index 4807491..e43ba11 100644 --- a/bytetracker/byte_tracker.py +++ b/bytetracker/byte_tracker.py @@ -1,5 +1,5 @@ import numpy as np -import torch + from bytetracker import matching from bytetracker.basetrack import BaseTrack, TrackState from bytetracker.kalman_filter import KalmanFilter @@ -7,16 +7,17 @@ def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y = np.copy(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y + def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y = np.copy(x) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center y[:, 2] = x[:, 2] - x[:, 0] # width @@ -28,7 +29,6 @@ class STrack(BaseTrack): shared_kalman = KalmanFilter() def __init__(self, tlwh, score, cls): - # wait activate self._tlwh = np.asarray(tlwh, dtype=float) self.kalman_filter = None @@ -53,7 +53,9 @@ def multi_predict(stracks): for i, st in enumerate(stracks): if st.state != TrackState.Tracked: multi_mean[i][7] = 0 - multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) + multi_mean, multi_covariance = STrack.shared_kalman.multi_predict( + multi_mean, multi_covariance + ) for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): stracks[i].mean = mean stracks[i].covariance = cov @@ -98,7 +100,9 @@ def update(self, new_track, frame_id): self.cls = new_track.cls new_tlwh = new_track.tlwh - self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)) + self.mean, self.covariance = self.kalman_filter.update( + self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh) + ) self.state = TrackState.Tracked self.is_activated = True @@ -188,10 +192,6 @@ def update(self, dets, _): confs = dets[:, 4] clss = dets[:, 5] - classes = clss.numpy() - xyxys = xyxys.numpy() - confs = confs.numpy() - remain_inds = confs > self.track_thresh inds_low = confs > 0.1 inds_high = confs < self.track_thresh @@ -204,8 +204,8 @@ def update(self, dets, _): scores_keep = confs[remain_inds] scores_second = confs[inds_second] - clss_keep = classes[remain_inds] - clss_second = classes[remain_inds] + clss_keep = clss[remain_inds] + clss_second = clss[remain_inds] if len(dets) > 0: """Detections""" @@ -245,10 +245,14 @@ def update(self, dets, _): # association the untrack to the low score detections if len(dets_second) > 0: """Detections""" - detections_second = [STrack(xywh, s, c) for (xywh, s, c) in zip(dets_second, scores_second, clss_second)] + detections_second = [ + STrack(xywh, s, c) for (xywh, s, c) in zip(dets_second, scores_second, clss_second) + ] else: detections_second = [] - r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] + r_tracked_stracks = [ + strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked + ] dists = matching.iou_distance(r_tracked_stracks, detections_second) matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) for itracked, idet in matches: @@ -303,7 +307,9 @@ def update(self, dets, _): self.lost_stracks.extend(lost_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) self.removed_stracks.extend(removed_stracks) - self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) + self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks( + self.tracked_stracks, self.lost_stracks + ) # get scores of lost tracks output_stracks = [track for track in self.tracked_stracks if track.is_activated] outputs = [] diff --git a/bytetracker/config/byte_track.yaml b/bytetracker/config/byte_track.yaml index b4c7478..accb795 100644 --- a/bytetracker/config/byte_track.yaml +++ b/bytetracker/config/byte_track.yaml @@ -1,5 +1,5 @@ byte_track: track_thresh: 0.45 track_buffer: 25 - match_thresh: 0.8 - frame_rate: 30 \ No newline at end of file + match_thresh: 0.8 + frame_rate: 30 diff --git a/bytetracker/kalman_filter.py b/bytetracker/kalman_filter.py index 1f270a6..f3b9182 100644 --- a/bytetracker/kalman_filter.py +++ b/bytetracker/kalman_filter.py @@ -6,7 +6,17 @@ freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv function and used as Mahalanobis gating threshold. """ -chi2inv95 = {1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919} +chi2inv95 = { + 1: 3.8415, + 2: 5.9915, + 3: 7.8147, + 4: 9.4877, + 5: 11.070, + 6: 12.592, + 7: 14.067, + 8: 15.507, + 9: 16.919, +} class KalmanFilter(object): @@ -110,7 +120,9 @@ def predict(self, mean, covariance): # mean = np.dot(self._motion_mat, mean) mean = np.dot(mean, self._motion_mat.T) - covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + covariance = ( + np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + ) return mean, covariance @@ -213,7 +225,9 @@ def update(self, mean, covariance, measurement): innovation = measurement - projected_mean new_mean = mean + np.dot(innovation, kalman_gain.T) - new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T)) + new_covariance = covariance - np.linalg.multi_dot( + (kalman_gain, projected_cov, kalman_gain.T) + ) return new_mean, new_covariance def gating_distance(self, mean, covariance, measurements, only_position=False, metric="maha"): @@ -251,7 +265,9 @@ def gating_distance(self, mean, covariance, measurements, only_position=False, m return np.sum(d * d, axis=1) elif metric == "maha": cholesky_factor = np.linalg.cholesky(covariance) - z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) + z = scipy.linalg.solve_triangular( + cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True + ) squared_maha = np.sum(z * z, axis=0) return squared_maha else: diff --git a/bytetracker/matching.py b/bytetracker/matching.py index 6a784de..d7384c9 100644 --- a/bytetracker/matching.py +++ b/bytetracker/matching.py @@ -36,7 +36,11 @@ def _indices_to_matches(cost_matrix, indices, thresh): def linear_assignment(cost_matrix, thresh): if cost_matrix.size == 0: - return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) + return ( + np.empty((0, 2), dtype=int), + tuple(range(cost_matrix.shape[0])), + tuple(range(cost_matrix.shape[1])), + ) matches, unmatched_a, unmatched_b = [], [], [] cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) for ix, mx in enumerate(x): @@ -60,7 +64,10 @@ def ious(atlbrs, btlbrs): if ious.size == 0: return ious - ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32)) + ious = bbox_ious( + np.ascontiguousarray(atlbrs, dtype=np.float32), + np.ascontiguousarray(btlbrs, dtype=np.float32), + ) return ious @@ -137,7 +144,9 @@ def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray([det.to_xyah() for det in detections]) for row, track in enumerate(tracks): - gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position) + gating_distance = kf.gating_distance( + track.mean, track.covariance, measurements, only_position + ) cost_matrix[row, gating_distance > gating_threshold] = np.inf return cost_matrix @@ -149,7 +158,9 @@ def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray([det.to_xyah() for det in detections]) for row, track in enumerate(tracks): - gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position, metric="maha") + gating_distance = kf.gating_distance( + track.mean, track.covariance, measurements, only_position, metric="maha" + ) cost_matrix[row, gating_distance > gating_threshold] = np.inf cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance return cost_matrix @@ -195,12 +206,18 @@ def bbox_ious(boxes, query_boxes): overlaps = np.zeros((N, K), dtype=np.float32) for k in range(K): - box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1) + box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * ( + query_boxes[k, 3] - query_boxes[k, 1] + 1 + ) for n in range(N): iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1 if iw > 0: ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1 if ih > 0: - ua = float((boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1) + box_area - iw * ih) + ua = float( + (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1) + + box_area + - iw * ih + ) overlaps[n, k] = iw * ih / ua return overlaps diff --git a/requirements.txt b/requirements.txt index f671661..4f47e74 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ lapx>=0.5.5 scipy==1.9.3 -torch>=1.13 \ No newline at end of file +torch>=1.13