forked from amymcgovern/pyparrot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDroneVision.py
244 lines (193 loc) · 9.3 KB
/
DroneVision.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
"""
DroneVision is separated from the main Mambo/Bebop class to enable the use of the drone without the FPV camera.
If you want to do vision processing, you will need to create a DroneVision object to capture the
video stream.
Note that this module relies on the opencv module and the ffmpeg program
Author: Amy McGovern, [email protected]
"""
import cv2
import threading
import time
import subprocess
import os
from utils.NonBlockingStreamReader import NonBlockingStreamReader
import inspect
from os.path import join
class DroneVision:
def __init__(self, drone_object, is_bebop, buffer_size=10):
"""
Setup your vision object and initialize your buffers. You won't start seeing pictures
until you call open_video.
:param drone_object reference to the drone (mambo or bebop) object
:param is_bebop: True if it is a bebop and false if it is a mambo
:param buffer_size: number of frames to buffer in memory. Defaults to 10.
"""
self.fps = 30
self.buffer_size = buffer_size
self.drone_object = drone_object
self.is_bebop = is_bebop
# initialize a buffer (will contain the last buffer_size vision objects)
self.buffer = [None] * buffer_size
self.buffer_index = 0
# setup the thread for monitoring the vision (but don't start it until we connect in open_video)
self.vision_thread = threading.Thread(target=self._buffer_vision,
args=(buffer_size, ))
self.user_vision_thread = None
self.vision_running = True
def set_user_callback_function(self, user_callback_function=None, user_callback_args=None):
"""
Set the (optional) user callback function for handling the new vision frames. This is
run in a separate thread that starts when you start the vision buffering
:param user_callback_function: function
:param user_callback_args: arguments to the function
:return:
"""
self.user_vision_thread = threading.Thread(target=self._user_callback,
args=(user_callback_function, user_callback_args))
def open_video(self):
"""
Open the video stream using ffmpeg for capturing and processing. The address for the stream
is the same for all Mambos and is documented here:
http://forum.developer.parrot.com/t/streaming-address-of-mambo-fpv-for-videoprojection/6442/6
Remember that this will only work if you have connected to the wifi for your mambo!
Note that the old method tried to open the stream directly into opencv but there are known issues
with rtsp streams in opencv. We bypassed opencv to use ffmpeg directly and then opencv is used to
process the output of ffmpeg
:return True if the vision opened correctly and False otherwise
"""
# start the stream on the bebop
if (self.is_bebop):
self.drone_object.start_video_stream()
# we have bypassed the old opencv VideoCapture method because it was unreliable for rtsp
fullPath = inspect.getfile(DroneVision)
shortPathIndex = fullPath.rfind("/")
if (shortPathIndex == -1):
# handle Windows paths
shortPathIndex = fullPath.rfind("\\")
print(shortPathIndex)
shortPath = fullPath[0:shortPathIndex]
imagePath = join(shortPath, "images")
utilPath = join(shortPath, "utils")
print(imagePath)
print(utilPath)
# the first step is to open the rtsp stream through ffmpeg first
# this step creates a directory full of images, one per frame
print("Opening ffmpeg")
if (self.is_bebop):
cmdStr = "ffmpeg -protocol_whitelist \"file,rtp,udp\" -i %s/bebop.sdp -r 30 image_" % utilPath + "%03d.png &"
print(cmdStr)
self.ffmpeg_process = \
subprocess.Popen(cmdStr, shell=True, cwd=imagePath, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
else:
self.ffmpeg_process = \
subprocess.Popen("ffmpeg -i rtsp://192.168.99.1/media/stream2 -r 30 image_%03d.png &",
shell=True, cwd=imagePath, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
print("Opening non-blocking readers")
# open non-blocking readers to look for errors or success
stderr_reader = NonBlockingStreamReader(self.ffmpeg_process.stderr)
stdout_reader = NonBlockingStreamReader(self.ffmpeg_process.stdout)
# look for success in the stdout
# If it starts correctly, it will have the following output in the stdout
# Stream mapping:
# Stream #0:0 -> #0:0 (h264 (native) -> png (native))
# if it fails, it has the following in stderr
# Output file #0 does not contain any stream
success = False
while (not success):
line = stderr_reader.readline()
if (line is not None):
line_str = line.decode("utf-8")
print(line_str)
if line_str.find("Stream #0:0 -> #0:0 (h264 (native) -> png (native))") > -1:
success = True
break
if line_str.find("Output file #0 does not contain any stream") > -1:
print("Having trouble connecting to the camera 1. A reboot of the mambo may help.")
break
line = stdout_reader.readline()
if (line is not None):
line_str = line.decode("utf-8")
print(line_str)
if line_str.find("Output file #0 does not contain any stream") > -1:
print("Having trouble connecting to the camera 2. A reboot of the mambo may help.")
break
if line_str.find("Stream #0:0 -> #0:0 (h264 (native) -> png (native))") > -1:
success = True
# cleanup our non-blocking readers no matter what happened
stdout_reader.finish_reader()
stderr_reader.finish_reader()
# the second thread starts opencv on these files. That will happen inside the other thread
# so here we just sent the image index to 1 ( to start)
self.image_index = 1
# return whether or not it worked
return success
def start_video_buffering(self):
"""
If the video capture was successfully opened, then start the thread to buffer the stream
:return:
"""
print("starting vision thread")
self.vision_thread.start()
if (self.user_vision_thread is not None):
self.user_vision_thread.start()
def _user_callback(self, user_vision_function, user_args):
"""
Internal method to call the user vision functions
:param user_vision_function: user callback function to handle vision
:param user_args: optional arguments to the user callback function
:return:
"""
while (self.vision_running):
if (self.new_frame):
user_vision_function(user_args)
#reset the bit for a new frame
self.new_frame = False
# put the thread back to sleep for fps
# sleeping shorter to ensure we stay caught up on frames
time.sleep(1.0 / (2.0 * self.fps))
def _buffer_vision(self, buffer_size):
"""
Internal method to save valid video captures from the camera fps times a second
:param buffer_size: number of images to buffer (set in init)
:return:
"""
# start with no new data
self.new_frame = False
while (self.vision_running):
# grab the latest image
try:
# make the name for the next image
path = "images/image_%03d.png" % (self.image_index)
if (not os.path.exists(path)) and (not os.path.isfile(path)):
#print("File %s doesn't exist" % (path))
continue
img = cv2.imread(path,1)
self.image_index = self.image_index + 1
# got a new image, save it to the buffer directly
self.buffer_index += 1
self.buffer_index %= buffer_size
#print video_frame
self.buffer[self.buffer_index] = img
self.new_frame = True
except cv2.error:
#Assuming its an empty image, so decrement the index and try again.
print("Trying to read an empty png. Let's wait and try again.")
self.image_index = self.image_index - 1
continue
# put the thread back to sleep for faster than fps to ensure we stay on top of the frames
time.sleep(1.0 / (2.0 * self.fps))
def get_latest_valid_picture(self):
"""
Return the latest valid image (from the buffer)
:return: last valid image received from the Mambo
"""
return self.buffer[self.buffer_index]
def stop_vision_buffering(self):
"""
Should stop the vision thread
"""
self.vision_running = False
self.ffmpeg_process.kill()
# send the command to kill the vision stream (bebop only)
if (self.is_bebop):
self.drone_object.stop_video_stream()