Skip to content

Commit

Permalink
Add multiple directions specification
Browse files Browse the repository at this point in the history
  • Loading branch information
ozora-ogino committed Dec 3, 2021
1 parent 870d8c7 commit 82482e5
Show file tree
Hide file tree
Showing 8 changed files with 53 additions and 29 deletions.
10 changes: 8 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
</h1>

<div align="center">
<img src="./outputs/testdata_yolov5l-fp16.gif" width="60%">
<img src="./outputs/example_yolov5l.gif" width="60%">
</div>

The motivation of TensorFlow Lite Human Tracking is developing person tacking system for edge camera.
Expand All @@ -14,7 +14,7 @@ To track and detect people over frames, DeepSORT is adopted.
For the detail about DeepSORT, refer [this great article](https://medium.com/augmented-startups/deepsort-deep-learning-applied-to-object-tracking-924f59f99104).


Currently [YOLOv5](https://github.com/ultralytics/yolov5) models are supported for object detection model.
Currently [YOLOv5](https://github.com/ultralytics/yolov5) models are supported for object detection.
To get YOLOv5 tflite model, see [`models/README.md`](./models/README.md)

## <div align="center">Quick Start Example</div>
Expand All @@ -23,6 +23,12 @@ To get YOLOv5 tflite model, see [`models/README.md`](./models/README.md)
git clone [email protected]:ozora-ogino/tflite-human-tracking.git
cd tflite-human-tracking
python main.py --src ./data/<YOUR_VIDEO_FILE>.mp4 --model ./models/<YOLOV5_MODEL>.tflite

# Set directions.
# For the value of direction you can choose one of 'bottom', 'top', right', 'left' or None.
python src/main.py --src ./data/trim10s.mp4 \
--model ./models/yolov5s-fp16.tflite \
--directions="{'total': None, 'inside': 'bottom', 'outside': 'top'}"
```

### Docker
Expand Down
Empty file modified build_image.sh
100644 → 100755
Empty file.
2 changes: 1 addition & 1 deletion data/video2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,6 @@ def _main(video: str, save_dir: str, limit_frames: int) -> None:
parser = ArgumentParser()
parser.add_argument("--video", required=True)
parser.add_argument("--save-dir", default="./data/frames/")
parser.add_argument("--limit-frames", default=300, type=int)
parser.add_argument("--limit-frames", default=800, type=int)
args = parser.parse_args()
_main(**vars(args))
2 changes: 1 addition & 1 deletion outputs/.gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
**
!.gitignore
!testdata_yolov5l-fp16.gif
!example_yolov5l.gif
Binary file removed outputs/testdata_yolov5l-fp16.gif
Binary file not shown.
Empty file modified run.sh
100644 → 100755
Empty file.
10 changes: 6 additions & 4 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import argparse
import os
import time
from typing import Dict, Tuple

import cv2
import numpy as np
Expand Down Expand Up @@ -58,7 +59,7 @@ def main(
video_fmt: str,
confidence: float,
iou_threshold: float,
direction: str,
directions: Dict[str, Tuple[bool]],
):
"""Track human objects and count the number of human.
Expand All @@ -74,8 +75,8 @@ def main(

# The line to count.
border = [(0, 500), (1920, 500)]
direction = direction_config.get(direction)
tracker = Tracker(border, direction)
directions = {key: direction_config.get(d_str) for key, d_str in directions.items()}
tracker = Tracker(border, directions)
detect = Detect(model, confidence)
stream = VideoStream(src)
writer = None
Expand Down Expand Up @@ -132,6 +133,7 @@ def main(
parser.add_argument("--video-fmt", help="Format of output video file.", choices=["mp4", "avi"], default="mp4")
parser.add_argument("--confidence", type=float, default=0.2, help="Confidence threshold.")
parser.add_argument("--iou-threshold", type=float, default=0.2, help="IoU threshold for NMS.")
parser.add_argument("--direction", default=None, choices=direction_config.keys())
parser.add_argument("--directions", default={"total": None}, type=eval, help="Directions")

args = vars(parser.parse_args())
main(**args)
58 changes: 37 additions & 21 deletions src/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class Tracker(object):
def __init__(
self,
border: List[Tuple[int]],
direction: Tuple[bool],
directions: Tuple[bool],
count_callback: Optional[Callable] = None,
):
"""Constructor of Tracker.
Expand All @@ -31,22 +31,33 @@ def __init__(
self.count_callback = count_callback
self.memory = {}
self.counter = 0
self.direction = direction

self.H, self.H = None, None
self.counter = {key: 0 for key in directions.keys()}
self.directions = directions

np.random.seed(2021)
self.COLORS = np.random.randint(0, 255, size=(200, 3), dtype="uint8")

def _is_count(self, center, center_prev, border) -> bool:
def _is_count(
self,
center: Tuple[int],
center_prev: Tuple[int],
border: List[Tuple[int]],
key: str,
) -> bool:
"""Check whether count or not.
1. check_direction: Check the direction of human movement.
If direction is not specified, return True.
2. is_intersect: Check whether the border and the human movement intersect.
Args:
center(Tuple[int]): Current center position.
center_prev(Tuple[int]): Previous center position.
border(List[Tuple[int]]): Border.
key(str): "inside", "outside" or "total".
"""

return check_direction(center, center_prev, self.direction) and is_intersect(
return check_direction(center, center_prev, self.directions[key]) and is_intersect(
center, center_prev, border[0], border[1]
)

Expand Down Expand Up @@ -93,11 +104,15 @@ def update(self, frame: np.ndarray, dets: np.ndarray) -> np.ndarray:
# Draw a motion of bounding box.
cv2.line(frame, center, center_prev, color, 3)

if self._is_count(center, center_prev, self.border):
self.counter += 1
# Execute callback.
if self.count_callback:
self.count_callback(self.counter)
callback = False
for key in self.directions.keys():
if self._is_count(center, center_prev, self.border, key):
self.counter[key] += 1
callback = True

# Execute callback.
if self.count_callback and callback:
self.count_callback(self.counter)

# Put ID on the box.
cv2.putText(
Expand All @@ -112,14 +127,15 @@ def update(self, frame: np.ndarray, dets: np.ndarray) -> np.ndarray:

# Draw border.
cv2.line(frame, self.border[0], self.border[1], (10, 255, 0), 3)
# Put counter in top right corner.
cv2.putText(
frame,
str(self.counter),
(100, 200),
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
5.0,
(10, 255, 0),
3,
)
# Put counter in the top left corner.
for i, (key, count) in enumerate(self.counter.items()):
cv2.putText(
frame,
f"{key}: {count}",
(30, 30 + 80 * (i + 1)),
cv2.FONT_HERSHEY_SIMPLEX,
2.0,
(10, 255, 0),
5,
)
return frame

0 comments on commit 82482e5

Please sign in to comment.