forked from QuinnThomas/grip-vision-2017
-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvisionNetworkRun.py
32 lines (29 loc) · 1.06 KB
/
visionNetworkRun.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import cv2
from picamera import PiCamera
from picamera.array import PiRGBArray
from networktables import NetworkTables
from time import clock
import grip
NetworkTables.initialize(server='roborio-167-frc.local')
table = NetworkTables.getTable('myContoursReport')
camera = PiCamera()
camera.resolution = (416, 320)
camera.framerate = 24
camera.exposure_compensation = 0
rawCapture = PiRGBArray(camera, size=(416, 320))
processor = grip.GripPipeline()
for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True):
contours = processor.process(frame.array)
datax, datay, dataw, datah = [], [], [], []
for contour in contours:
data = cv2.boundingRect(contour)
datax.append(data[0])
datay.append(data[1])
dataw.append(data[2])
datah.append(data[3])
table.putNumberArray('x', datax)
table.putNumberArray('y', datay)
table.putNumberArray('w', dataw)
table.putNumberArray('h', datah)
print("[%d] Put one frame with %d contours" % (1000 * clock(), len(contours)))
rawCapture.truncate(0)