Skip to content

Commit 0dab2ae

Browse files
Code
The code extracts KW, Kwh and A readings from electricity meter videos. The text is then displayed on the screen in real-time.
0 parents  commit 0dab2ae

File tree

1 file changed

+148
-0
lines changed

1 file changed

+148
-0
lines changed

Code

Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
2+
import cv2
3+
import numpy as np
4+
import imutils
5+
import os
6+
7+
# feature matching + text on screen
8+
9+
#give the path of video
10+
cap = cv2.VideoCapture('C:\\Users\\hp\\electric meter 3.mp4')
11+
12+
#give the path of template images
13+
img1 = cv2.imread("C:\\Users\\hp\\Template\\temp\\A-p(1).png", cv2.IMREAD_GRAYSCALE) # query image1
14+
img2 = cv2.imread("C:\\Users\\hp\\Template\\temp\\kwh temp 3.png", cv2.IMREAD_GRAYSCALE) # query image2
15+
img5 = cv2.imread("C:\\Users\\hp\\Template\\temp\\kw better.png", cv2.IMREAD_GRAYSCALE) # query image3
16+
img7 = cv2.imread('C:\\Users\\hp\\Template\\V temp.png',0)
17+
18+
count = 0
19+
20+
# Resizing the template images if required
21+
22+
#w,h = template.shape[::-1]
23+
#for scale in np.linspace(.2, 1.0, 20)[::-1]:
24+
#resized1 = imutils.resize(template, width = int(template.shape[1] * .35))
25+
#r = template.shape[1] / float(resized1.shape[1])
26+
# img1 = imutils.resize(img1, height=50)
27+
# img2 = imutils.resize(img2, height=50)
28+
# img5 = imutils.resize(img5, height=50)
29+
30+
# Feature detection
31+
sift = cv2.xfeatures2d.SIFT_create()
32+
kp_image1, desc_image1 = sift.detectAndCompute(img1, None)
33+
kp_image2, desc_image2 = sift.detectAndCompute(img2, None)
34+
kp_image5, desc_image5 = sift.detectAndCompute(img5, None)
35+
kp_image7, desc_image7 = sift.detectAndCompute(img7, None)
36+
37+
# Feature matching :depending upon system use or the next 3 commented out lines
38+
#index_params = dict(algorithm=0, trees=5)
39+
#search_params = dict()
40+
#flann = cv2.FlannBasedMatcher(index_params, search_params)
41+
bf = cv2.BFMatcher()
42+
43+
while True:
44+
ret, frame = cap.read()
45+
46+
(h, w) = frame.shape[:2]
47+
center = (w / 2, h / 2)
48+
49+
# rotate the image by 270 degrees
50+
M = cv2.getRotationMatrix2D(center, 270, 1.0)
51+
rotated = cv2.warpAffine(frame, M, (w, h))
52+
blurred_frame = cv2.GaussianBlur(rotated, (5, 5), 0)
53+
hsv = cv2.cvtColor(blurred_frame, cv2.COLOR_BGR2HSV)
54+
55+
lower_green = np.array([35, 100, 20])
56+
upper_green = np.array([85, 255, 255])
57+
mask = cv2.inRange(hsv, lower_green, upper_green)
58+
59+
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
60+
61+
#find the biggest area
62+
c = max(contours, key = cv2.contourArea)
63+
64+
x,y,w,h = cv2.boundingRect(c)
65+
66+
# draw the reading area contour (in blue)
67+
im=cv2.rectangle(rotated,(x,y),(x+w,y+h),(255,0,0),2)
68+
roi=im[y:y+h,x:x+w]
69+
# resized = imutils.resize(roi, height=450)
70+
grayframe = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) #trainimage
71+
72+
kp_grayframe, desc_grayframe = sift.detectAndCompute(grayframe, None)
73+
74+
#if flann matcher is used : then flann.knnMatch() instead of bf.knnMatch
75+
matches1 = bf.knnMatch(desc_image1, desc_grayframe, k=2)
76+
matches2 = bf.knnMatch(desc_image2, desc_grayframe, k=2)
77+
matches5 = bf.knnMatch(desc_image5, desc_grayframe, k=2)
78+
matches7 = bf.knnMatch(desc_image7, desc_grayframe, k=2)
79+
80+
good_points1 = []
81+
for m, n in matches1:
82+
if m.distance < 0.8*n.distance: # 0.8 is the ideal value ..adjust according to need
83+
good_points1.append(m)
84+
85+
good_points2 = []
86+
for p, n in matches2:
87+
if p.distance < 0.8*n.distance:
88+
good_points2.append(p)
89+
90+
good_points5 = []
91+
for q, n in matches5:
92+
if q.distance < 0.8*n.distance:
93+
good_points5.append(q)
94+
95+
good_points7 = []
96+
for r, n in matches7:
97+
if r.distance < 0.8*n.distance:
98+
good_points7.append(r)
99+
100+
img3 = cv2.drawMatches(img1, kp_image1, grayframe, kp_grayframe, good_points1, grayframe)
101+
img4 = cv2.drawMatches(img2, kp_image2, grayframe, kp_grayframe, good_points2, grayframe)
102+
img6 = cv2.drawMatches(img5, kp_image5, grayframe, kp_grayframe, good_points5, grayframe)
103+
img8 = cv2.drawMatches(img7, kp_image7, grayframe, kp_grayframe, good_points7, grayframe)
104+
105+
height, width =img3.shape[:2]
106+
for m,n in matches1:
107+
if len(good_points1)>2: #adjust value of 2 according to video quality
108+
font=cv2.FONT_HERSHEY_SIMPLEX
109+
cv2.putText(grayframe,'A',(150,100),font,2,(255,255,255),3, cv2.LINE_AA)
110+
111+
height, width =img4.shape[:2]
112+
for p,n in matches2:
113+
if len(good_points2)>20:
114+
#adjust the value of 20 according to the video quality .. lower the value if video quality is poor
115+
font=cv2.FONT_HERSHEY_SIMPLEX
116+
cv2.putText(grayframe,'KWH',(150,100),font,2,(255,255,255),3, cv2.LINE_AA)
117+
118+
# storing the frame of kWh as an image in .jpg format
119+
count += 1
120+
if count == 1 :
121+
cv2.imwrite(os.path.join('C:\\Users\\hp\\Template','kWh%d.png') % count,img4)
122+
123+
124+
height, width =img6.shape[:2]
125+
for q,n in matches5:
126+
if len(good_points5)>12: #adjust value of 12 according to video
127+
font=cv2.FONT_HERSHEY_SIMPLEX
128+
cv2.putText(grayframe,'KW',(150,100),font,2,(255,255,255),3, cv2.LINE_AA)
129+
130+
height, width =img8.shape[:2]
131+
for r,n in matches7:
132+
if len(good_points7)>4: #adjust value of 4 according to video quality
133+
font=cv2.FONT_HERSHEY_SIMPLEX
134+
cv2.putText(grayframe,'V',(150,100),font,2,(255,255,255),3, cv2.LINE_AA)
135+
136+
#cv2.imshow("Frame", rotated)
137+
#cv2.imshow("A", img3)
138+
#cv2.imshow("KWH", img4)
139+
#cv2.imshow("KW", img6)
140+
#cv2.imshow("V", img8)
141+
cv2.imshow("result",grayframe)
142+
143+
key = cv2.waitKey(1)
144+
if key == 27:
145+
break
146+
147+
cap.release()
148+
cv2.destroyAllWindows()

0 commit comments

Comments
 (0)