|
| 1 | +import pyrealsense2 as rs |
| 2 | +import numpy as np |
| 3 | +import cv2 |
| 4 | +import os |
| 5 | +import copy, math |
| 6 | +import tkinter as Tkinter |
| 7 | +import argparse |
| 8 | + |
| 9 | + |
| 10 | +class Arc_Real: |
| 11 | + def __init__(self, jpg_path): |
| 12 | + # Basic setting |
| 13 | + self.width, self.height = self.screen_size() |
| 14 | + self.x_ratio, self.y_ratio = self.screen_ratio() |
| 15 | + |
| 16 | + # Data from txt log file |
| 17 | + BagFilePath = os.path.abspath(jpg_path) |
| 18 | + file_dir = os.path.dirname(BagFilePath) |
| 19 | + Pro_Dir = os.path.dirname(file_dir) |
| 20 | + self.weg_id, target_color = os.path.splitext(os.path.basename(BagFilePath))[0].split('-') |
| 21 | + |
| 22 | + with open('{}/shp/matcher.txt'.format(Pro_Dir), 'r') as txtfile: |
| 23 | + self.title = [elt.strip() for elt in txtfile.readline().split(',')] |
| 24 | + self.frame_list = [[elt.strip() for elt in line.split(',')] for line in txtfile if line.split(',')[0] == self.weg_id] |
| 25 | + |
| 26 | + self.frame_dict, self.i = self.get_attribute(color=target_color, weg_id=self.weg_id) |
| 27 | + file_name = '{}\\bag\\{}.bag'.format(Pro_Dir, self.weg_id) |
| 28 | + # Start Pipeline |
| 29 | + self.pipeline = rs.pipeline() |
| 30 | + config = rs.config() |
| 31 | + config.enable_device_from_file(file_name) |
| 32 | + config.enable_all_streams() |
| 33 | + profile = self.pipeline.start(config) |
| 34 | + device = profile.get_device() |
| 35 | + playback = device.as_playback() |
| 36 | + playback.set_real_time(False) |
| 37 | + |
| 38 | + self.frame_getter('Color') # Get Color Frame with the matching frame number from self.frame_dict |
| 39 | + mode = 'Video Mode' |
| 40 | + direction = 1 |
| 41 | + while True: |
| 42 | + img = self.frame_to_image() |
| 43 | + self.img_work(mode=mode, img=img) |
| 44 | + cv2.namedWindow("Color Stream", cv2.WINDOW_FULLSCREEN) |
| 45 | + cv2.imshow("Color Stream", img) |
| 46 | + |
| 47 | + if mode == 'Measure Mode': |
| 48 | + self.img_origin = self.img_work(mode='Measure Mode', img=img) |
| 49 | + self.img_copy = copy.copy(self.img_origin) |
| 50 | + cv2.setMouseCallback("Color Stream", self.draw) |
| 51 | + cv2.imshow("Color Stream", self.img_copy) |
| 52 | + |
| 53 | + key = cv2.waitKeyEx(0) |
| 54 | + |
| 55 | + # if pressed escape exit program |
| 56 | + if key == 27 or key == 113 or cv2.getWindowProperty('Color Stream', cv2.WND_PROP_VISIBLE) < 1: |
| 57 | + break |
| 58 | + |
| 59 | + elif key == 32: # press space |
| 60 | + if mode == 'Measure Mode': |
| 61 | + mode = 'Video Mode' |
| 62 | + else: |
| 63 | + self.frame_dict, self.i = self.get_attribute(color=self.color_frame_num, weg_id=self.weg_id) |
| 64 | + self.img_work(mode='Searching', img=img) |
| 65 | + cv2.imshow("Color Stream", img) |
| 66 | + cv2.waitKey(1) |
| 67 | + item = self.get_attribute(color=self.color_frame_num, weg_id=self.weg_id) |
| 68 | + if item is not None: |
| 69 | + print(item) |
| 70 | + mode = 'Measure Mode' |
| 71 | + |
| 72 | + elif key == 2555904: # press right |
| 73 | + self.i += 1 |
| 74 | + direction = 1 |
| 75 | + elif key == 2424832: # press left |
| 76 | + self.i -= 1 |
| 77 | + direction = -1 |
| 78 | + |
| 79 | + if mode == 'Measure Mode': |
| 80 | + self.img_work(mode='Searching', img=img) |
| 81 | + cv2.imshow("Color Stream", img) |
| 82 | + cv2.waitKey(1) |
| 83 | + while True: |
| 84 | + Color, Depth = self.frame_getter('Color'), self.frame_getter('Depth') |
| 85 | + if Color and Depth: |
| 86 | + break |
| 87 | + else: |
| 88 | + self.i += direction |
| 89 | + |
| 90 | + if mode != 'Measure Mode': |
| 91 | + frames = self.pipeline.wait_for_frames() |
| 92 | + self.color_frame = frames.get_color_frame() |
| 93 | + self.color_frame_num = self.color_frame.get_frame_number() |
| 94 | + |
| 95 | + print('finish') |
| 96 | + cv2.destroyAllWindows() |
| 97 | + os._exit(0) |
| 98 | + self.pipeline.stop() |
| 99 | + |
| 100 | + def screen_size(self): |
| 101 | + root = Tkinter.Tk() |
| 102 | + width = root.winfo_screenwidth() |
| 103 | + height = root.winfo_screenheight() |
| 104 | + return int(width * 0.8), int(height * 0.8) |
| 105 | + |
| 106 | + def screen_ratio(self): |
| 107 | + img_size = (1920.0, 1080.0) |
| 108 | + screen = self.screen_size() |
| 109 | + width_ratio, height_ratio = screen[0]/img_size[0],screen[1]/img_size[1] |
| 110 | + return width_ratio, height_ratio |
| 111 | + |
| 112 | + def get_attribute(self, color, weg_id): |
| 113 | + for obj in self.frame_list: |
| 114 | + if obj[0] == weg_id and abs(int(obj[2]) - int(color)) < 5: |
| 115 | + i = self.frame_list.index(obj) |
| 116 | + obj = dict(zip(self.title, obj)) |
| 117 | + return obj, i |
| 118 | + |
| 119 | + def index_to_obj(self): |
| 120 | + if self.i >= len(self.frame_list): |
| 121 | + self.i = self.i % len(self.frame_list) +1 |
| 122 | + content = self.frame_list[self.i] |
| 123 | + self.frame_dict = dict(zip(self.title, content)) |
| 124 | + |
| 125 | + def draw(self, event, x, y, flags, params): |
| 126 | + img = copy.copy(self.img_copy) |
| 127 | + if event == 1: |
| 128 | + self.ix = x |
| 129 | + self.iy = y |
| 130 | + cv2.imshow("Color Stream", self.img_copy) |
| 131 | + elif event == 4: |
| 132 | + img = self.img_copy |
| 133 | + self.img_work(img=img, mode='calc', x=x, y=y) |
| 134 | + cv2.imshow("Color Stream", img) |
| 135 | + elif event == 2: |
| 136 | + self.img_copy = copy.copy(self.img_origin) |
| 137 | + cv2.imshow("Color Stream", self.img_copy) |
| 138 | + elif flags == 1: |
| 139 | + self.img_work(img=img, mode='calc', x=x, y=y) |
| 140 | + cv2.imshow("Color Stream", img) |
| 141 | + |
| 142 | + def img_work(self, mode, img, x=0, y=0): |
| 143 | + if 'Measure' in mode: # show black at NAN |
| 144 | + depth_image = np.asanyarray(self.depth_frame.get_data()) |
| 145 | + grey_color = 0 |
| 146 | + depth_image_3d = np.dstack( |
| 147 | + (depth_image, depth_image, depth_image)) # depth image is 1 channel, color is 3 channels |
| 148 | + depth_image_3d= cv2.resize(depth_image_3d, self.screen_size()) |
| 149 | + img = np.where((depth_image_3d <= 0), grey_color, img) |
| 150 | + |
| 151 | + font = cv2.FONT_ITALIC |
| 152 | + fontScale = 1 |
| 153 | + fontColor = (0, 0, 0) |
| 154 | + lineType = 2 |
| 155 | + |
| 156 | + # Add frame number |
| 157 | + mid_line_frame_num = int(self.width/4) |
| 158 | + rec1, rec2 = (mid_line_frame_num - 50, 20), (mid_line_frame_num + 50, 60) |
| 159 | + text = str(self.color_frame_num) |
| 160 | + bottomLeftCornerOfText = (mid_line_frame_num - 45, 50) |
| 161 | + cv2.rectangle(img, rec1, rec2, (255, 255, 255), -1) |
| 162 | + cv2.putText(img, text, bottomLeftCornerOfText, font, fontScale, fontColor, lineType) |
| 163 | + |
| 164 | + if mode == 'calc': |
| 165 | + pt1, pt2 = (self.ix, self.iy), (x, y) |
| 166 | + ans = self.calculate_distance(x, y) |
| 167 | + text = '{0:.3}'.format(ans) |
| 168 | + bottomLeftCornerOfText = (self.ix + 10, (self.iy - 10)) |
| 169 | + rec1, rec2 = (self.ix + 10, (self.iy - 5)), (self.ix + 80, self.iy - 35) |
| 170 | + |
| 171 | + cv2.line(img, pt1=pt1, pt2=pt2, color=(0, 0, 230), thickness=3) |
| 172 | + |
| 173 | + else: |
| 174 | + mid_line_screen = int(self.width/2) |
| 175 | + rec1, rec2 = (mid_line_screen-100, 20), (mid_line_screen + 130, 60) |
| 176 | + text = mode |
| 177 | + bottomLeftCornerOfText = (mid_line_screen - 95, 50) |
| 178 | + |
| 179 | + cv2.rectangle(img, rec1, rec2, (255, 255, 255), -1) |
| 180 | + cv2.putText(img, text, bottomLeftCornerOfText, font, fontScale, fontColor, lineType) |
| 181 | + return img |
| 182 | + |
| 183 | + def calculate_distance(self, x, y): |
| 184 | + color_intrin = self.color_intrin |
| 185 | + x_ratio, y_ratio = self.x_ratio, self.y_ratio |
| 186 | + ix, iy = int(self.ix/x_ratio), int(self.iy/y_ratio) |
| 187 | + x, y = int(x/x_ratio), int(y/y_ratio) |
| 188 | + udist = self.depth_frame.get_distance(ix, iy) |
| 189 | + vdist = self.depth_frame.get_distance(x, y) |
| 190 | + if udist ==0.00 or vdist ==0.00: |
| 191 | + dist = 'NaN' |
| 192 | + else: |
| 193 | + point1 = rs.rs2_deproject_pixel_to_point(color_intrin, [ix, iy], udist) |
| 194 | + point2 = rs.rs2_deproject_pixel_to_point(color_intrin, [x, y], vdist) |
| 195 | + dist = math.sqrt( |
| 196 | + math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2) + math.pow( |
| 197 | + point1[2] - point2[2], 2)) |
| 198 | + return dist |
| 199 | + |
| 200 | + def frame_getter(self, mode): |
| 201 | + align_to = rs.stream.color |
| 202 | + align = rs.align(align_to) |
| 203 | + count = 0 |
| 204 | + while True: |
| 205 | + self.index_to_obj() |
| 206 | + num = self.frame_dict[mode] |
| 207 | + frames = self.pipeline.wait_for_frames() |
| 208 | + aligned_frames = align.process(frames) |
| 209 | + |
| 210 | + if mode == 'Color': |
| 211 | + frame = aligned_frames.get_color_frame() |
| 212 | + self.color_frame_num = frame.get_frame_number() |
| 213 | + self.color_intrin = frame.profile.as_video_stream_profile().intrinsics |
| 214 | + self.color_frame = frame |
| 215 | + else: |
| 216 | + frame = aligned_frames.get_depth_frame() |
| 217 | + self.depth_frame_num = frame.get_frame_number() |
| 218 | + self.depth_frame = frame |
| 219 | + |
| 220 | + frame_num = frame.get_frame_number() |
| 221 | + |
| 222 | + count = self.count_search(count, frame_num, int(num)) |
| 223 | + print('Suchen {}: {}, Jetzt: {}'.format(mode, num, frame_num)) |
| 224 | + |
| 225 | + if abs(int(frame_num) - int(num)) < 5: |
| 226 | + print('match {} {}'.format(mode, frame_num)) |
| 227 | + return frame |
| 228 | + elif count > 10: |
| 229 | + print(num + ' nicht gefunden, suchen naechste frame') |
| 230 | + return None |
| 231 | + |
| 232 | + |
| 233 | + def frame_to_image(self): |
| 234 | + color_image = np.asanyarray(self.color_frame.get_data()) |
| 235 | + color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB) |
| 236 | + color_final = cv2.resize(color_cvt, (self.width, self.height)) |
| 237 | + return color_final |
| 238 | + |
| 239 | + def count_search(self, count, now, target): |
| 240 | + if abs(now - target) < 100: |
| 241 | + count += 1 |
| 242 | + print('search count:{}'.format(count)) |
| 243 | + return count |
| 244 | + |
| 245 | + |
| 246 | +def main(): |
| 247 | + ap = argparse.ArgumentParser() |
| 248 | + ap.add_argument("-p", "--path", required=False) |
| 249 | + |
| 250 | + args = vars(ap.parse_args()) |
| 251 | + |
| 252 | + jpg_path = args['path'] |
| 253 | + |
| 254 | + if 'Kamera' in jpg_path: |
| 255 | + os.startfile(jpg_path) |
| 256 | + else: |
| 257 | + Arc_Real(jpg_path) |
| 258 | + |
| 259 | +if __name__ == '__main__': |
| 260 | + main() |
0 commit comments