Skip to content

Commit

Permalink
Merge pull request #18 from machacahomer-robotica-2019/development
Browse files Browse the repository at this point in the history
Development
  • Loading branch information
AbelChT authored Mar 21, 2019
2 parents 3fc357b + dff2a72 commit 35f545b
Show file tree
Hide file tree
Showing 6 changed files with 451 additions and 17 deletions.
152 changes: 141 additions & 11 deletions Robot.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from multiprocessing import Process, Value, Array, Lock
import numpy as np

from RobotFrameCapturer import RobotFrameCapturer
from config_file import *
from utils import delay_until

Expand Down Expand Up @@ -71,6 +72,9 @@ def __init__(self, init_position=[0.0, 0.0, 0.0]):
# Set motors ports
self.motor_port_left = self.BP.PORT_C
self.motor_port_right = self.BP.PORT_B
self.motor_port_basket = self.BP.PORT_A

self.basket_state = 'up'

# Encoder timer
self.encoder_timer = 0
Expand Down Expand Up @@ -105,17 +109,6 @@ def setSpeed(self, v, w):
self.w.value = w
self.lock_odometry.release()

def test_encoders(self):
"""
Read the wheels speed from the encoders and print it
"""
[grad_izq, grad_der] = [self.BP.get_motor_encoder(self.motor_port_left),
self.BP.get_motor_encoder(self.motor_port_right)]
rad_izq = math.radians(grad_izq)
rad_der = math.radians(grad_der)

print("Rueda izquierda: ", rad_izq, " | Rueda derecha: ", rad_der)

def readSpeed(self):
"""
Read the robot speed
Expand Down Expand Up @@ -269,3 +262,140 @@ def normalizeAngle(self, angle):
elif angle > math.pi:
angle = angle - 2 * math.pi
return angle

# ------------------- -------- -------------------
# ------------------- TRACKING -------------------
# ------------------- -------- -------------------
def obtainTrackObjectSpeed(self, x, size):
"""
Return the speed to track an object
:param x:
:param size:
:return:
"""
if x > 160:
x = 320 - x
w = -1
else:
w = 1

# Split the screen in 7 parts, depending where it is the ball, the robot will turn with higher or lower speed
if x < 60:
w = 0.8 * w
elif x < 100:
w = 0.5 * w
elif x < 140:
w = 0.2 * w
else:
w = 0

# Depending how far is the ball, the robot will follow it faster or slower
if size < 40:
v = 0.25
elif size < 80:
v = 0.15
else:
v = 0.08

return v, w

def trackObject(self, colorRangeMin=[0, 0, 0], colorRangeMax=[255, 255, 255]):
"""
Track object in range (colorRangeMin, colorRangeMax)
:param colorRangeMin:
:param colorRangeMax:
:return:
"""
# Start the process who update the vision values
frame_capturer = RobotFrameCapturer(colorRangeMin, colorRangeMax)
frame_capturer.start()

# Give camara time to wake up
time.sleep(1)

finished = False

recognition_w = 0.8

recognition_v = 0

recognition_sample_period = 0.2

last_x = 0

while not finished:
x, y, size = frame_capturer.getPosition()

# 1. search the most promising blob ..
# Find promising blob
if last_x < 160:
self.setSpeed(recognition_v, recognition_w)
else:
self.setSpeed(recognition_v, -recognition_w)

while size == 0:
# While not promising blob found
time.sleep(recognition_sample_period)
x, y, size = frame_capturer.getPosition()

# When promising blob is found, stop robot
self.setSpeed(0, 0)

# Set ball not reached for now
followBallRecognised = True

while followBallRecognised:
x, y, size = frame_capturer.getPosition()

# Save last ball recognised position to guess by which side it slided
if x > 10:
last_x = x

# Only if ball is still in image, track it
if size > 0:
next_v, next_w = self.obtainTrackObjectSpeed(x, size)

# If we think ball is in basket, catch it and rotate the robot to ensure the supposition
if size > 110:
self.setSpeed(0, 0)
self.catch('down')

self.setSpeed(0, 0.4)
time.sleep(5)

self.setSpeed(0, 0)
_, _, size = frame_capturer.getPosition()

# If ball is in the basket, finish, else lift the basket
if size > 70:
followBallRecognised = False
finished = True

else:
self.catch('up')
self.setSpeed(next_v, next_w)
else:
self.setSpeed(next_v, next_w)
else:
followBallRecognised = False

frame_capturer.stop()
return finished

def catch(self, movement):
"""
Lift and low the basket
:param movement:
:return:
"""
if movement != self.basket_state:
if movement == 'up':
self.BP.set_motor_dps(self.motor_port_basket, -85)
time.sleep(1)
self.BP.set_motor_dps(self.motor_port_basket, 0)
self.basket_state = 'up'
elif movement == 'down':
self.BP.set_motor_dps(self.motor_port_basket, 85)
time.sleep(1)
self.BP.set_motor_dps(self.motor_port_basket, 0)
self.basket_state = 'down'
168 changes: 168 additions & 0 deletions RobotFrameCapturer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
import time

from multiprocessing import Process, Value, Array, Lock

import imutils
import numpy as np

from config_file import is_debug

import cv2

if not is_debug:
import picamera
from picamera.array import PiRGBArray


class RobotFrameCapturer(object):
def __init__(self, minRange, maxRange):
# Store X, Y and size
self.x_object = Value('d', 0)
self.y_object = Value('d', 0)
self.size_object = Value('d', 0)

# Check if finished
self.finished = Value('b', False)

# if we want to block several instructions to be run together, we may want to use an explicit Lock
self.lock_frame_capturer = Lock()

# min and max range
self.minRange = minRange
self.maxRange = maxRange

def start(self):
self.p = Process(target=self.loopFrameCapturer)
self.p.start()

def stop(self):
self.finished.value = False

def getPosition(self):
self.lock_frame_capturer.acquire()
x = self.x_object.value
y = self.y_object.value
size = self.size_object.value
self.lock_frame_capturer.release()
return x, y, size

def obtainBallPositionAndSize(self, imgBGR, minRange, maxRange):
"""
Obtain the ball position center in pixels and its radius size
:param imgBGR: Image in RGB
:param minRange: Min color range to detect ball in HSV
:param maxRange: Min color range to detect ball in HSV
:return:
"""
# Some part of next lines was obtained from https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
blurred = cv2.GaussianBlur(imgBGR, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

# Allow two ranges of colors for red
if minRange[0] > maxRange[0]:
minRange0 = list(minRange)
minRange0[0] = 0

maxRange0 = list(maxRange)
maxRange0[0] = maxRange[0]

minRange1 = list(minRange)
minRange1[0] = minRange[0]

maxRange1 = list(maxRange)
maxRange1[0] = 180

mask0 = cv2.inRange(hsv, np.asarray(minRange0), np.asarray(maxRange0))

mask1 = cv2.inRange(hsv, np.asarray(minRange1), np.asarray(maxRange1))

mask = cv2.bitwise_or(mask1, mask0)

else:
mask = cv2.inRange(hsv, minRange, maxRange)

mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)

# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

x = 0
y = 0
radius = 0

# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

# only proceed if the radius meets a minimum size
if radius > 5:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(imgBGR, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(imgBGR, center, 5, (0, 0, 255), -1)

# Show image for debug only
# output = cv2.bitwise_and(imgBGR, imgBGR, mask=mask)
# cv2.imshow("images", np.hstack([imgBGR, output]))

# Take images every 100 ms
cv2.waitKey(50)

return x, y, radius

def loopFrameCapturer(self):
if is_debug:
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
_, imgBGR = cap.read()

x, y, size = self.obtainBallPositionAndSize(imgBGR, self.minRange, self.maxRange)

self.lock_frame_capturer.acquire()
self.x_object.value = x
self.y_object.value = y
self.size_object.value = size
self.lock_frame_capturer.release()

if self.finished.value:
break

else:
cam = picamera.PiCamera()

cam.resolution = (320, 240)
cam.framerate = 32
rawCapture = PiRGBArray(cam, size=(320, 240))

# allow the camera to warmup
time.sleep(0.1)

for img in cam.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# Capture frame
imgBGR = img.array

x, y, size = self.obtainBallPositionAndSize(imgBGR, self.minRange, self.maxRange)

self.lock_frame_capturer.acquire()
self.x_object.value = x
self.y_object.value = y
self.size_object.value = size
self.lock_frame_capturer.release()

rawCapture.truncate(0)
cv2.waitKey(1)

if self.finished.value:
break
7 changes: 1 addition & 6 deletions config_file.py
Original file line number Diff line number Diff line change
@@ -1,6 +1 @@
import os

# If it's true means that we are in debug mode (local execution)
# to accomplish that automatically we should declare the RASPBERRY_DEPLOY variable
# in the Robot or set is_debug to false there and true in local
is_debug = 'RASPBERRY_DEPLOY' not in os.environ
is_debug = False
Loading

0 comments on commit 35f545b

Please sign in to comment.