[英]Py script for fishing minigame cv2.matchTemplate
I am trying to make a script for fishing minigame, there is a post with a fishing script for albion online, the problem is that in this game the pointer is very thin and there are many different textures and colors, to the simple grayscale matching works good on one part of the slider (for example on the water texture), sometimes work on others(trees texture) and does not work on the third (ex. sky).我正在尝试做一个钓鱼小游戏的脚本,网上有一个关于albion钓鱼脚本的帖子,问题是这个游戏的指针很细而且有很多不同的纹理和颜色,简单的灰度匹配工作在滑块的一个部分(例如在水纹理上)很好,有时在其他部分(树木纹理)上工作,在第三个部分(例如天空)上不起作用。 If I change to lower threshold it activates often without matching.
如果我更改为较低的阈值,它通常会在不匹配的情况下激活。
import numpy as np
import cv2
from mss.windows import MSS as mss
from PIL import Image
import time
import pyautogui as pg
import cv2
import mss
import numpy
template = cv2.imread("perfect3.png", cv2.IMREAD_GRAYSCALE)
w, h = template.shape[::-1]
fgbg = cv2.createBackgroundSubtractorMOG2(
history=10,
varThreshold=2,
detectShadows=False)
with mss.mss() as sct:
monitor = {"top": 344, "left": 4419, "width": 150, "height": 666}
while "Screen capturing":
last_time = time.time()
img = numpy.array(sct.grab(monitor))
gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_frame, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(res >= 0.85)
for pt in zip(*loc[::-1]):
cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 255, 0), 3)
print('click')
cv2.imshow("OpenCV/Numpy normal", img)
key = cv2.waitKey(1)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
Tried it also with edge detection cv2.canny without luck.也尝试了边缘检测 cv2.canny 没有运气。
The point is to click the button when the fish is in the smallest green field.重点是当鱼在最小的绿色区域时点击按钮。 The field appears in random parts of a slider.
该字段出现在滑块的随机部分。
Any ideas?有任何想法吗?
================ ================
Tried color matching as Furas said像 Furas 所说的那样尝试配色
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
from mss.linux import MSS as mss
from PIL import Image
import mss
import numpy
import pyautogui
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (42, 84, 211)
greenUpper = (69, 130, 255)
blueLower = (88, 76, 255)
blueUpper = (151, 76, 255)
pts = deque(maxlen=args["buffer"])
# grab video from screen(monitor area)
with mss.mss() as sct:
monitor = {"top": 325, "left": 4423, "width": 136, "height": 662}
while "Screen capturing":
#last_time = time.time()
#vs = numpy.array(sct.grab(monitor))
#print("fps: {}".format(1 / (time.time() - last_time)))
vs = sct.grab(monitor)
# grab the current frame
#frame = vs
frame = np.array(vs)
# resize the frame, blur it, and convert it to the HSV
# color space
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
mask2 = cv2.inRange(hsv, blueLower, blueUpper)
mask2 = cv2.erode(mask2, None, iterations=2)
mask2 = cv2.dilate(mask2, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
cnts2 = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts2 = imutils.grab_contours(cnts2)
center2 = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing rectangle and
# centroid
c = max(cnts, key=cv2.contourArea)
(x, y, w, h) = cv2.boundingRect(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
c2 = max(cnts2, key=cv2.contourArea)
(x2, y2, w2, h2) = cv2.boundingRect(c2)
M2 = cv2.moments(c2)
center2 = (int(M2["m10"] / M2["m00"]), int(M2["m01"] / M2["m00"]))
# draw the rectangle and centroid on the frame,
# then update the list of tracked points
cv2.rectangle(frame, (int(x), int(y)), (int(x+w), int(y+h)),(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
cv2.rectangle(frame, (int(x2), int(y2)), (int(x2+w2), int(y2+h2)),(0, 255, 255), 2)
cv2.circle(frame, center2, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
if y-15 < y2 < y+15:
pyautogui.click(4908, 984)
time.sleep(2)
y2 = 0
cv2.imshow("frame", frame)
key = cv2.waitKey(1)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
But before the round or between rounds i get error但在回合之前或回合之间我得到错误
Traceback (most recent call last):
File "C:\Users\Game\Desktop\Py\Fish.py", line 74, in <module>
c2 = max(cnts2, key=cv2.contourArea)
ValueError: max() arg is an empty sequence
How can it be solved?如何解决?
Going to change the two masks in one as Furas adviced按照 Furas 的建议,将两个面具合二为一
mask = cv2.bitwise_or(mask1, mask2)
But what can i use to find both max for green field and fish?但是我可以用什么来找到绿地和鱼的最大值? till now it was 2 masks, 2 cnts and 2 max values.
到目前为止,它是 2 个面具、2 个 cnts 和 2 个最大值。
With mss sct.grab i have not so great FPS (average 25fps) any other better capturing methods?使用 mss sct.grab 我没有那么好的 FPS(平均 25fps)还有其他更好的捕获方法吗?
Thanks a lot!非常感谢!
Ok, after some workaround i got it working and tested, with a colour detection it works on the firs "day map".好的,经过一些解决方法后,我让它工作并进行了测试,通过颜色检测,它可以在第一个“日图”上工作。 Not always get "perfect" hit, but at least "good", guess it is becouse of FPS procession, maybee if I find the way to reise FPS, than it would be better.
并不总是获得“完美”的命中率,但至少是“好”,猜测是因为 FPS 队列,也许如果我找到提高 FPS 的方法,那会更好。
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
from mss.linux import MSS as mss
from PIL import Image
import mss
import pyautogui
from win32 import win32gui
from pythonwin import win32ui
from win32.lib import win32con
from win32 import win32api
# define the lower and upper boundaries in the HSV color space, then initialize the
# list of tracked points
# Green "perfect" field
greenLower = (42, 79, 211)
greenUpper = (69, 130, 255)
# Blue fish
blueLower = (88, 76, 255)
blueUpper = (151, 76, 255)
# Purple field
blue2Lower = (114, 139, 218)
blue2Upper = (123, 165, 255)
# Gray field "Status" (after fish is caught)
grayLower = (0, 0, 114)
grayUpper = (0, 0, 132)
# Purple miss
purpleLower = (123, 148, 239)
purpleUpper = (125, 165, 243)
# Define Vars
y2 = 0
a = 0
b = 0
startTime = time.time()
startTime2 = time.time()
# grab video from screen(monitor area)
with mss.mss() as sct:
#grab picture of slider and fish
monitor = {"top": 846, "left": 4726, "width": 162, "height": 398}
#grab picture of gray field "Status" when the fish is caught
monitor2 = {"top": 1017, "left": 4366, "width": 11, "height": 23}
#grab picture of purple pixels if fish is lost
monitor3 = {"top": 1013, "left": 4484, "width": 5, "height": 6}
while "Screen capturing":
vs = sct.grab(monitor)
vs2 = sct.grab(monitor2)
vs3 = sct.grab(monitor3)
# grab the current frame
frame = np.array(vs)
frame2 = np.array(vs2)
frame3 = np.array(vs3)
# resize the frame, blur it, and convert it to the HSV
# color space
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
blurred2 = cv2.GaussianBlur(frame2, (11, 11), 0)
hsv2 = cv2.cvtColor(blurred2, cv2.COLOR_BGR2HSV)
blurred3 = cv2.GaussianBlur(frame3, (11, 11), 0)
hsv3 = cv2.cvtColor(blurred3, cv2.COLOR_BGR2HSV)
# construct a mask for the color, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
# Mask for green 'perfect' field
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# Mask for Fish
mask2 = cv2.inRange(hsv, blueLower, blueUpper)
mask2 = cv2.erode(mask2, None, iterations=2)
mask2 = cv2.dilate(mask2, None, iterations=2)
# Mask for purple Start slider
mask3 = cv2.inRange(hsv, blue2Lower, blue2Upper)
mask3 = cv2.erode(mask3, None, iterations=2)
mask3 = cv2.dilate(mask3, None, iterations=2)
# Mask for gray field "Status" when the fish is caught
mask4 = cv2.inRange(hsv2, grayLower, grayUpper)
mask4 = cv2.erode(mask4, None, iterations=2)
mask4 = cv2.dilate(mask4, None, iterations=2)
# Mask for purple fish miss
mask5 = cv2.inRange(hsv3, purpleLower, purpleUpper)
mask5 = cv2.erode(mask5, None, iterations=2)
mask5 = cv2.dilate(mask5, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the rectangle
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts2 = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts2 = imutils.grab_contours(cnts2)
cnts3 = cv2.findContours(mask3.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts3 = imutils.grab_contours(cnts3)
cnts4 = cv2.findContours(mask4.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts4 = imutils.grab_contours(cnts4)
cnts5 = cv2.findContours(mask5.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts5 = imutils.grab_contours(cnts5)
# only proceed if at least one contour was found
if len(cnts3) > 0:
print ("start")
startTime2 = time.time()
time.sleep(0.1)
pyautogui.click(4978, 1239)
time.sleep(1)
startTime2 = time.time()
elif len(cnts) > 0 and len(cnts2) > 0:
startTime2 = time.time()
# find the largest contour in the mask, then use
# it to compute the minimum enclosing rectangle and
# centroid
c = max(cnts, key=cv2.contourArea)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
(x, y) = center
c2 = max(cnts2, key=cv2.contourArea)
M2 = cv2.moments(c2)
center2 = (int(M2["m10"] / M2["m00"]), int(M2["m01"] / M2["m00"]))
(x2, y2) = center2
if y-65 < y2 < y+65:
print ("catch")
pyautogui.click(4978, 1239)
time.sleep(1)
y2 = 0
startTime2 = time.time()
elif len(cnts4) > 0:
time.sleep(1)
a +=1
endTime = time.time()
timer = endTime-startTime
hour = timer // 3600
timer %= 3600
minutes = timer // 60
timer %= 60
seconds = timer
print (a, "fishes and", b, "misses in %d:%d:%d" % (hour, minutes, seconds))
print ("start over")
time.sleep(1)
pyautogui.click(4741, 913)
pyautogui.click(4741, 913)
time.sleep(1)
pyautogui.click(4978, 1239)
pyautogui.click(4978, 1239)
time.sleep(1)
startTime2 = time.time()
elif len(cnts5) > 0:
b +=1
print ("Miss")
time.sleep(2)
pyautogui.click(4978, 1239)
startTime2 = time.time()
else:
endTime2 = time.time()
if (endTime2 - startTime2 > 40):
print("Longer than 40 seconds")
startTime2 = time.time()
print ("restart")
pyautogui.click(4741, 913)
time.sleep(1)
pyautogui.click(4978, 1239)
time.sleep(1)
key = cv2.waitKey(1)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.