diff --git a/samples/bucket_test.py b/samples/bucket_test.py deleted file mode 100644 index 0e4ef4d..0000000 --- a/samples/bucket_test.py +++ /dev/null @@ -1,23 +0,0 @@ -from src.modules.imaging.bucket_detector import BucketDetector -from src.modules.imaging.camera import DebugCameraFromDir -from src.modules.imaging.location import DebugLocationProvider -# from src.modules.imaging.analysis import ImageAnalysisDelegate -from src.modules.imaging.debug_analysis import DebugImageAnalysisDelegate - - - -cam = DebugCameraFromDir("images") -det = BucketDetector("samples/models/n640.pt") -location = DebugLocationProvider() -analysis = DebugImageAnalysisDelegate(det, cam, location) -# analysis = ImageAnalysisDelegate(det, cam, location) - -def test(a1, a2): - if a1 != None and a2 != None: - print(a1) - print(a2) - -analysis.subscribe(test) - -analysis.start() - diff --git a/samples/bucket_test_simple.py b/samples/bucket_test_simple.py deleted file mode 100644 index 892c8a9..0000000 --- a/samples/bucket_test_simple.py +++ /dev/null @@ -1,47 +0,0 @@ -from src.modules.imaging.bucket_detector import BucketDetector -from src.modules.imaging.camera import DebugCameraFromDir, RPiCamera, DebugCamera -from src.modules.imaging.location import DebugLocationProvider -from src.modules.imaging.analysis import ImageAnalysisDelegate - -from ultralytics import YOLO -import os - -import cv2 -from PIL import Image -import time - -cam = RPiCamera(0) -model_path = 'samples/models' -models = os.listdir(model_path) -models = ["best.pt"] -for file in models: - model = YOLO(os.path.join(model_path, file)) - i = len(os.listdir("photos")) - while True: - image = cam.capture() - image.save(f"photos/{i}.png") - results = model(image) - - result = results[0] # because one image - a = result.plot() - # b = Image.fromarray(a) - # b.save(f"results/{i}.png") - # - cv2.imshow(f'Result for model: {file}', a) - cv2.waitKey(0) - cv2.destroyAllWindows() - time.sleep(0.5) - i += 1 - # boxes = result.boxes - - # if boxes is not None and len(boxes) > 0: - # best_box = boxes[boxes.conf.argmax()] - # (x1, y1, x2, y2) = best_box.xyxy[0].tolist() # box [x1, y1, x2, y2] - # conf = best_box.conf.item() # confidence threshold - # if conf < 0.5: - # print("not confident") - # else: - # print("confident") - # else: - # print("no bounding box") - diff --git a/samples/emu_connection.py b/samples/emu_connection.py index f47ca3c..c490034 100644 --- a/samples/emu_connection.py +++ b/samples/emu_connection.py @@ -1,33 +1,20 @@ from src.modules.emu import Emu import time -import json emu = Emu("res") -# def onConnect(): -# loadCurrent = { -# "type": "load", -# "uavStatus": { -# "connection": "no", -# "mode": "test", -# "imageCount": "2", -# "timeSinceMessage": "3" -# }, -# "imageName": "res/sample1.jpg" -# } -# emu.send_msg(json.dumps(loadCurrent)) - - -# emu.set_on_connect(onConnect) emu.start_comms() time.sleep(2) # test different logs for i in range(6): print(f"sending log {i}") - if i % 3 == 0: severity = "normal" - elif i % 3 == 1: severity = "warning" - else: severity = "error" + if i % 3 == 0: + severity = "normal" + elif i % 3 == 1: + severity = "warning" + else: + severity = "error" emu.send_log(f"log text {i}", severity) time.sleep(1) diff --git a/samples/geofence_test.py b/samples/geofence.py similarity index 99% rename from samples/geofence_test.py rename to samples/geofence.py index 090f34a..f8dc086 100644 --- a/samples/geofence_test.py +++ b/samples/geofence.py @@ -37,5 +37,3 @@ print(lander.geofence_check(point5)) print(lander.geofence_check(point6)) print(lander.geofence_check(point7)) - - diff --git a/samples/img_test.py b/samples/img_analysis.py similarity index 99% rename from samples/img_test.py rename to samples/img_analysis.py index c202da2..b3040a4 100644 --- a/samples/img_test.py +++ b/samples/img_analysis.py @@ -13,7 +13,6 @@ def test(img, _): print("Image taken") - analysis = ImageAnalysisDelegate(detector, camera, location) analysis.subscribe(test) diff --git a/samples/kml_mock.py b/samples/kml_mock.py deleted file mode 100644 index 904907d..0000000 --- a/samples/kml_mock.py +++ /dev/null @@ -1,21 +0,0 @@ -from src.modules.imaging.kml import KMLGenerator, LatLong - - -kml = KMLGenerator() - -hotspot_1 = LatLong(1,1) -hotspot_2 = LatLong(1,2) -hotspot_3 = LatLong(0,0) - -kml.push(hotspot_1) -kml.push(hotspot_2) -kml.push(hotspot_3) - -print(kml.read(-1).latitude, kml.read(-1).longitude) -kml.pop() - -print(kml.read(-1).latitude, kml.read(-1).longitude) - -kml.set_source("Crashed Drone", LatLong(24, 24)) - -kml.generate("out.kml") diff --git a/samples/oakd.py b/samples/oakd.py index ba51d4d..1f3cd4e 100644 --- a/samples/oakd.py +++ b/samples/oakd.py @@ -9,9 +9,11 @@ emu = Emu("tmp") i = 0 + def print_conn(): print("connecton made") + emu.set_on_connect(print_conn) latest_capture: DepthCapture | None = None @@ -23,6 +25,7 @@ def print_conn(): camera_thread = threading.Thread(target=camera.start(), daemon=True) camera_thread.start() + def send_img(message): global latest_capture, i @@ -38,6 +41,7 @@ def send_img(message): i += 1 + def measure(message): global latest_capture @@ -53,7 +57,7 @@ def measure(message): if latest_capture is not None: distance = latest_capture.distance_between_points(p1["x"], p1["y"], p2["x"], p2["y"]) send = { - "type": "distance", + "type": "distance", "message": distance } emu.send_msg(json.dumps(send)) diff --git a/samples/test_nn.py b/samples/test_nn.py deleted file mode 100644 index e52ba78..0000000 --- a/samples/test_nn.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -benchmark the speed of different trained yolo models -""" - -from ultralytics import YOLO -import os -import time -import math -import json - - -class ModelStats: - def __init__(self): - self.n = 0 - self.mean = 0.0 - self.M2 = 0.0 - def update(self, t): - self.n += 1 - delta = t-self.mean - self.mean += delta / self.n - delta2 = t - self.mean - self.M2 += delta * delta2 - def get_mean(self): - return self.mean - - def get_stddev(self): - return math.sqrt(self.M2 / self.n) if self.n > 1 else 0.0 - - -models = os.listdir("models") - -images = os.listdir("images") -images = [ os.path.join("images", file_name) for file_name in images ] - -stats = {} - -for model_name in models: - print(model_name) - model = YOLO(os.path.join("models", model_name)) - - model_stats = ModelStats() - - for img_name in images: - start = time.time() - results = model(img_name) - end = time.time() - model_stats.update(end - start) - print(f"\tmean: {model_stats.get_mean()}") - stats[model_name] = {"mean": model_stats.get_mean(), "stddev": model_stats.get_stddev()} - - -with open("results.json", "w") as f: - json.dump(stats, f) diff --git a/scripts/test.sh b/scripts/test.sh index 189951e..a06cd8c 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -1,3 +1,3 @@ #!/usr/bin/env sh -PYTHONPATH=".:dep/labeller" pytest +PYTHONPATH="." pytest diff --git a/src/modules/emu/__init__.py b/src/modules/emu/__init__.py index 953fd9b..e69de29 100644 --- a/src/modules/emu/__init__.py +++ b/src/modules/emu/__init__.py @@ -1 +0,0 @@ -from .emu import * diff --git a/src/modules/emu/emu.py b/src/modules/emu/emu.py index b808e72..ecaa29e 100644 --- a/src/modules/emu/emu.py +++ b/src/modules/emu/emu.py @@ -47,7 +47,7 @@ def send_image(self, path: str): } self._send_queue.put(json.dumps(content)) - def send_log(self, message: str, severity: str="normal"): + def send_log(self, message: str, severity: str = "normal"): """ sends a log message to Emu message: string of flog @@ -87,7 +87,6 @@ async def producer_handler(self, ws): """ handles sending messages to the client """ - event_loop = asyncio.get_running_loop() while not ws.closed: message = await asyncio.to_thread(self._send_queue.get) @@ -101,7 +100,7 @@ async def consumer_handler(self, ws): elif msg.type == aiohttp.WSMsgType.ERROR: print("WebSocket error:", ws.exception()) - + async def handle_websocket(self, request): ws = web.WebSocketResponse() await ws.prepare(request) @@ -122,5 +121,5 @@ async def handle_websocket(self, request): print('websocket connection closed') self._is_connected = False - + return ws diff --git a/src/modules/imaging/analysis.py b/src/modules/imaging/analysis.py index 94ad15a..6a461b9 100644 --- a/src/modules/imaging/analysis.py +++ b/src/modules/imaging/analysis.py @@ -1,4 +1,4 @@ -from typing import Callable, Optional, List, Callable, Any +from typing import Callable, List, Callable, Any import threading # from multiprocessing import Process @@ -46,9 +46,9 @@ class ImageAnalysisDelegate: def __init__(self, detector: BaseDetector, camera: CameraProvider, - location_provider: LocationProvider = None, - navigation_provider: Navigator = None, - debugger: Optional[ImageAnalysisDebugger] = None): + location_provider: LocationProvider | None = None, + debugger: ImageAnalysisDebugger | None = None, + navigation_provider: Navigator | None = None): self.detector = detector self.camera = camera self.debugger = debugger @@ -59,9 +59,9 @@ def __init__(self, self.location_provider = location_provider self.navigation_provider = navigation_provider - self.subscribers: List[Callable[[Image.Image, float, float], Any]] = [] + self.subscribers: List[Callable[[Image.Image, tuple[float, float] | None], Any]] = [] self.camera_attributes = CameraAttributes() - self.thread = None + self.loop = True def get_inference(self, bounding_box: BoundingBox) -> Inference: @@ -75,6 +75,14 @@ def get_inference(self, bounding_box: BoundingBox) -> Inference: inference = Inference(bounding_box, altitude) return inference + def _analysis_loop(self): + """ + Indefinitely run image analysis. This should be run in another thread; + use `start()` to do so. + """ + while self.loop: + self._analyze_image() + def start(self): """ Will start the image analysis process in another thread. @@ -87,8 +95,9 @@ def start(self): # Use `threading` to start `self._analysis_loop` in another thread. def stop(self): - self.loop = False - self.thread.join() + if self.thread is not None: + self.loop = False + self.thread.join() def _analyze_image(self): """ @@ -114,14 +123,6 @@ def _analyze_image(self): else: subscriber(im, None) - def _analysis_loop(self): - """ - Indefinitely run image analysis. This should be run in another thread; - use `start()` to do so. - """ - while self.loop: - self._analyze_image() - def subscribe(self, callback: Callable): """ Subscribe to image analysis updates. For example: diff --git a/src/modules/imaging/bucket_detector.py b/src/modules/imaging/bucket_detector.py index 7aa3b8e..d6b1660 100644 --- a/src/modules/imaging/bucket_detector.py +++ b/src/modules/imaging/bucket_detector.py @@ -2,7 +2,7 @@ from PIL import Image -from .detector import Vec2, BoundingBox, BaseDetector +from .detector import Vec2, BoundingBox, BaseDetector from ultralytics import YOLO diff --git a/src/modules/imaging/camera.py b/src/modules/imaging/camera.py index 3880002..555c680 100644 --- a/src/modules/imaging/camera.py +++ b/src/modules/imaging/camera.py @@ -42,6 +42,7 @@ def caputure_as_ndarry(self) -> np.ndarray: """ return np.array(self.capture()) + @dataclass class DepthCapture: rgb: np.ndarray @@ -130,7 +131,8 @@ def capture_with_depth(self) -> DepthCapture: NOTE: .start() must have been called first. If it has not, this will raise Exception.""" if not self.device or self.device.isClosed(): raise Exception("No oakD connection, perhaps you forgot to call the .start() function") - + if self.queue is None: + raise Exception("Queue does not exist") msg = self.queue.get() rgbFrame = msg["rgb"] cv_frame = rgbFrame.getCvFrame() @@ -145,11 +147,10 @@ def capture_with_depth(self) -> DepthCapture: return capture def capture(self) -> Image.Image: - capture = self.capture_with_depth + capture = self.capture_with_depth() img = Image.fromarray(capture.rgb, "RGB") return img - def start(self): """Start the depth-perception process on the OAK-D""" print("Starting OAK-D Connection") @@ -161,6 +162,7 @@ def stop(self): self.device.close() self.queue = None + class DebugCamera(CameraProvider): """ Debug camera source which always returns the same image loaded from @@ -210,7 +212,7 @@ def capture(self) -> Image.Image: self.index = (self.index + 1) % len(self.imgs) return Image.open(filename).resize(self.size) - + class GazeboCamera(CameraProvider): """ @@ -219,7 +221,7 @@ class GazeboCamera(CameraProvider): def __init__(self): self.port = 5600 - + gst_pipeline = ( "udpsrc address=127.0.0.1 port=5600 ! " "application/x-rtp, encoding-name=H264 ! " @@ -228,7 +230,7 @@ def __init__(self): "videoconvert ! " "appsink" ) - self.size = (640, 480) + self.size = (640, 480) self.cap = cv2.VideoCapture(gst_pipeline, cv2.CAP_GSTREAMER) if not self.cap.isOpened(): @@ -294,7 +296,7 @@ class RPiCamera(CameraProvider): source. """ - def __init__(self, cam_num: int): + def __init__(self, cam_num: int = 0): from picamera2 import Picamera2 self.camera = Picamera2(cam_num) self.size = (640, 480) diff --git a/src/modules/imaging/debug_analysis.py b/src/modules/imaging/debug_analysis.py deleted file mode 100644 index c67a2e1..0000000 --- a/src/modules/imaging/debug_analysis.py +++ /dev/null @@ -1,133 +0,0 @@ -from typing import Callable, Optional, List, Callable, Any - -import threading -# from multiprocessing import Process -from .detector import BaseDetector, BoundingBox -from .camera import CameraProvider -from .debug import ImageAnalysisDebugger -from ..georeference.inference_georeference import get_object_location -from .location import LocationProvider -from PIL import Image, ImageDraw - -import os - -from .analysis import CameraAttributes, Inference - - -class DebugImageAnalysisDelegate: - """ - Implements an imaging inference loops and provides several methods which - can be used to query the latest image analysis results. - - Responsible for capturing pictures regularly, detecting any landing pads in - those pictures and then providing the most recent estimate of the landing - pad location from the camera's perspective. - - Pass an `ImageAnalysisDebugger` when constructing to see a window with live - results. - - TODO: geolocate the landing pad using the drone's location. - """ - - def __init__(self, - detector: BaseDetector, - camera: CameraProvider, - location_provider: LocationProvider, - debugger: Optional[ImageAnalysisDebugger] = None, - ): - import os - self.detector = detector - self.camera = camera - self.debugger = debugger - self.location_provider = location_provider - self.subscribers: List[Callable[[Image.Image, float, float], Any]] = [] - self.camera_attributes = CameraAttributes() - - # log pictures taken - os.makedirs("tmp/log", exist_ok=True) - dirs = os.listdir("tmp/log") - current_path = f"tmp/log/{len(dirs)}" - os.makedirs(current_path) - - # path to store images taken during flight - self.img_path = f"{current_path}/images" - os.makedirs(self.img_path) - - # annotated bounding boxes - self.bb_img_path = f"{current_path}/bb" - os.makedirs(self.bb_img_path) - - # image number - self.i = 0 - - def get_inference(self, bounding_box: BoundingBox) -> Inference: - inference = Inference(bounding_box, self.location_provider.altitude()) - return inference - - def start(self): - """ - Will start the image analysis process in another thread. - """ - thread = threading.Thread(target=self._analysis_loop) - # process = Process(target=self._analysis_loop) - thread.start() - # process.start() - # Use `threading` to start `self._analysis_loop` in another thread. - - def _analyze_image(self): - """ - Actually performs the image analysis once. Only useful for testing, - should otherwise we run by `start()` which then starts - `_analysis_loop()` in another thread. - """ - im = self.camera.capture() - im.save(os.path.join(self.img_path, f"{self.i}.png")) - - bounding_box = self.detector.predict(im) - - if bounding_box: - draw = ImageDraw.Draw(im) - bb = (bounding_box.position.x, bounding_box.position.y, - bounding_box.size.x, bounding_box.size.y) - draw.rectangle(bb) - - im.save(os.path.join(self.bb_img_path, f"{self.i}.png")) - - self.i += 1 - - if self.debugger is not None: - self.debugger.set_image(im) - if bounding_box is not None: - self.debugger.set_bounding_box(bounding_box) - - for subscriber in self.subscribers: - if bounding_box: - inference = self.get_inference(bounding_box) - if inference: - x, y = get_object_location(self.camera_attributes, - inference) - subscriber(im, (x, y)) - else: - subscriber(im, None) - - def _analysis_loop(self): - """ - Indefinitely run image analysis. This should be run in another thread; - use `start()` to do so. - """ - while True: - self._analyze_image() - - def subscribe(self, callback: Callable): - """ - Subscribe to image analysis updates. For example: - - def myhandler(image: Image.Image, bounding_box: BoundingBox): - if bounding_box is None: - print("No bounding box detected") - else: - print("Bounding box detected") - - imaging_process.subscribe(myhandler) - """ - self.subscribers.append(callback) diff --git a/src/modules/imaging/detector.py b/src/modules/imaging/detector.py index 0a3b572..1f51e62 100644 --- a/src/modules/imaging/detector.py +++ b/src/modules/imaging/detector.py @@ -1,8 +1,10 @@ from functools import lru_cache from typing import Optional - +from dataclasses import dataclass +from functools import cached_property from PIL import Image import numpy as np +import math import cv2 @@ -54,36 +56,35 @@ def max(v1: 'Vec2', v2: 'Vec2') -> 'Vec2': class BoundingBox: + def __init__(self, position: Vec2, size: Vec2): + self.position = position + self.size = size -def __init__(self, position: Vec2, size: Vec2): - self.position = position - self.size = size - -@lru_cache(maxsize=2) -def intersection(self, other: 'BoundingBox') -> float: - top_left = Vec2.max(self.position, other.position) - bottom_right = Vec2.min(self.position + self.size, - other.position + other.size) + @lru_cache(maxsize=2) + def intersection(self, other: 'BoundingBox') -> float: + top_left = Vec2.max(self.position, other.position) + bottom_right = Vec2.min(self.position + self.size, + other.position + other.size) - size = bottom_right - top_left + size = bottom_right - top_left - intersection = size.x * size.y - return max(intersection, 0) + intersection = size.x * size.y + return max(intersection, 0) -def union(self, other: 'BoundingBox') -> float: - intersection = self.intersection(other) - if intersection == 0: - return 0 + def union(self, other: 'BoundingBox') -> float: + intersection = self.intersection(other) + if intersection == 0: + return 0 - union = self.size.x * self.size.y + other.size.x * other.size.y - intersection - return union + union = self.size.x * self.size.y + other.size.x * other.size.y - intersection + return union -def intersection_over_union(self, pred: 'BoundingBox') -> Optional[float]: - intersection = self.intersection(pred) - if intersection == 0: - return 0 - iou = intersection / self.union(pred) - return iou + def intersection_over_union(self, pred: 'BoundingBox') -> Optional[float]: + intersection = self.intersection(pred) + if intersection == 0: + return 0 + iou = intersection / self.union(pred) + return iou class BaseDetector: @@ -97,7 +98,7 @@ def predict(self, image: Image.Image) -> Optional[BoundingBox]: img = np.array(image) gray_img = cv2.cvtColor(img, cv2.COLOR_RGBA2GRAY) - max_val = np.max(gray_img) # returns maximum value of brightness + max_val = int(np.max(gray_img)) # returns maximum value of brightness if max_val < 200: return None # lower threshold for intensity _, thresh = cv2.threshold(gray_img, max_val - 10, 255, cv2.THRESH_BINARY) diff --git a/src/sim_scripts/gazebo_camera_test.py b/src/sim_scripts/gazebo_camera.py similarity index 100% rename from src/sim_scripts/gazebo_camera_test.py rename to src/sim_scripts/gazebo_camera.py diff --git a/test/test_analysis.py b/test/test_analysis.py index 080f1b8..7eceb71 100644 --- a/test/test_analysis.py +++ b/test/test_analysis.py @@ -12,18 +12,17 @@ from src.modules.imaging.camera import DebugCamera from src.modules.imaging.location import DebugLocationProvider from src.modules.imaging.debug import ImageAnalysisDebugger -from dep.labeller.benchmarks.detector import LandingPadDetector, BoundingBox -from dep.labeller.loader.label import Vec2 +from src.modules.imaging.detector import BaseDetector, BoundingBox, Vec2 -class DebugLandingPadDetector(LandingPadDetector): +class DebugLandingPadDetector(BaseDetector): def __init__(self, vector: Optional[Vec2] = None, bb: Optional[BoundingBox] = None): self.bounding_box = bb - def predict(self, _image: Image.Image) -> Optional[BoundingBox]: + def predict(self, image: Image.Image) -> Optional[BoundingBox]: return self.bounding_box