|
| 1 | +import sys |
| 2 | +import pytesseract |
| 3 | +import cv2 as cv |
| 4 | +import numpy as np |
| 5 | +from datetime import datetime |
| 6 | +import re |
| 7 | +import matplotlib.pyplot as plt |
| 8 | +from concurrent.futures import ThreadPoolExecutor |
| 9 | +import os |
| 10 | + |
| 11 | +def is_display_attached(): |
| 12 | + # Check if the DISPLAY environment variable is set |
| 13 | + return 'DISPLAY' in os.environ |
| 14 | + |
| 15 | +def extract_text_from_region(image, x, y, font_size, length): |
| 16 | + """ |
| 17 | + Extracts text from a specific region of the image. |
| 18 | + :param image: The image to extract text from. |
| 19 | + :param x: The x-coordinate of the top-left corner of the region. |
| 20 | + :param y: The y-coordinate of the top-left corner of the region. |
| 21 | + :param font_size: The font size of the text. |
| 22 | + :param length: The length of the text to extract. |
| 23 | + :return: The extracted text. |
| 24 | + """ |
| 25 | + margin = 5 |
| 26 | + y_adjusted = max(0, y - margin) |
| 27 | + x_adjusted = max(0, x - margin) |
| 28 | + height = y + font_size + margin |
| 29 | + width = x + length + margin |
| 30 | + # Define the region of interest (ROI) for text extraction |
| 31 | + roi = image[y_adjusted:height, x_adjusted:width] |
| 32 | + |
| 33 | + # Use Tesseract to extract text from the ROI |
| 34 | + return pytesseract.image_to_string(roi, lang='eng') |
| 35 | + |
| 36 | +def process_frame(frame_idx, frame): |
| 37 | + print("Processing Frame: ", frame_idx) |
| 38 | + |
| 39 | + timestamp_format = "%H:%M:%S:%f" |
| 40 | + timestamp_pattern = r'\b\d{2}:\d{2}:\d{2}:\d{3}\b' |
| 41 | + |
| 42 | + # Convert frame to grayscale for better OCR performance |
| 43 | + frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) |
| 44 | + |
| 45 | + line_1 = extract_text_from_region(frame, 10, 10, 40, 600) |
| 46 | + line_2 = extract_text_from_region(frame, 10, 70, 40, 600) |
| 47 | + |
| 48 | + # Find the timestamps(Type: string) in the extracted text using regex |
| 49 | + tx_time = re.search(timestamp_pattern, line_1) |
| 50 | + rx_time = re.search(timestamp_pattern, line_2) |
| 51 | + |
| 52 | + if tx_time is None or rx_time is None: |
| 53 | + print("Error: Timestamp not found in the expected format.") |
| 54 | + return 0 |
| 55 | + |
| 56 | + # Convert the timestamps(Type: string) to time (Type: datetime) |
| 57 | + tx_time = datetime.strptime(tx_time.group(), timestamp_format) |
| 58 | + rx_time = datetime.strptime(rx_time.group(), timestamp_format) |
| 59 | + |
| 60 | + if tx_time is None or rx_time is None: |
| 61 | + print("Error: Timestamp not found in the expected format.") |
| 62 | + return 0 |
| 63 | + |
| 64 | + if tx_time > rx_time: |
| 65 | + print("Error: Transmit time is greater than receive time.") |
| 66 | + return 0 |
| 67 | + |
| 68 | + time_difference = rx_time - tx_time |
| 69 | + time_difference_ms = time_difference.total_seconds() * 1000 |
| 70 | + return time_difference_ms |
| 71 | + |
| 72 | +def main(): |
| 73 | + if len(sys.argv) < 2: |
| 74 | + print("Usage: python text-detection.py <input_video_file> <output_image_name>") |
| 75 | + sys.exit(1) |
| 76 | + |
| 77 | + input_video_file = sys.argv[1] |
| 78 | + cap = cv.VideoCapture(input_video_file) |
| 79 | + if not cap.isOpened(): |
| 80 | + print("Fatal: Could not open video file.") |
| 81 | + sys.exit(1) |
| 82 | + |
| 83 | + frame_idx = 0 |
| 84 | + time_differences = [] |
| 85 | + |
| 86 | + with ThreadPoolExecutor(max_workers=40) as executor: |
| 87 | + futures = [] |
| 88 | + while True: |
| 89 | + ret, frame = cap.read() |
| 90 | + if not ret: |
| 91 | + break |
| 92 | + |
| 93 | + futures.append(executor.submit(process_frame, frame_idx, frame)) |
| 94 | + frame_idx += 1 |
| 95 | + |
| 96 | + for future in futures: |
| 97 | + time_differences.append(future.result()) |
| 98 | + |
| 99 | + # Filter out zero values from time_differences |
| 100 | + non_zero_time_differences = [td for td in time_differences if td != 0] |
| 101 | + |
| 102 | + # Calculate the average latency excluding zero values |
| 103 | + if non_zero_time_differences: |
| 104 | + average_latency = np.mean(non_zero_time_differences) |
| 105 | + |
| 106 | + # Filter out anomaly peaks that differ more than 25% from the average for average calculation |
| 107 | + filtered_time_differences = [ |
| 108 | + td for td in non_zero_time_differences if abs(td - average_latency) <= 0.25 * average_latency |
| 109 | + ] |
| 110 | + |
| 111 | + # Calculate the average latency using the filtered data |
| 112 | + filtered_average_latency = np.mean(filtered_time_differences) |
| 113 | + else: |
| 114 | + print("Fatal: No timestamps recognized in the video. No data for calculating latency.") |
| 115 | + sys.exit(1) |
| 116 | + |
| 117 | + # Plot the non-zero data |
| 118 | + plt.plot(non_zero_time_differences, marker='o') |
| 119 | + plt.title('End-to-End Latency — Media Transport Library') |
| 120 | + plt.xlabel('Frame Index') |
| 121 | + plt.ylabel('Latency, ms') |
| 122 | + plt.grid(True) |
| 123 | + |
| 124 | + # Adjust the layout to create more space for the text |
| 125 | + plt.subplots_adjust(bottom=0.5) |
| 126 | + |
| 127 | + # Prepare text for display and stdout |
| 128 | + average_latency_text = f'Average End-to-End Latency: {filtered_average_latency:.2f} ms' |
| 129 | + file_name = os.path.basename(input_video_file) |
| 130 | + file_mod_time = datetime.fromtimestamp(os.path.getmtime(input_video_file)).strftime('%Y-%m-%d %H:%M:%S') |
| 131 | + file_info_text = f'File: {file_name} | Last modified: {file_mod_time} UTC' |
| 132 | + width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) |
| 133 | + height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) |
| 134 | + fps = cap.get(cv.CAP_PROP_FPS) |
| 135 | + video_properties_text = f'Resolution: {width}x{height} | FPS: {fps:.2f}' |
| 136 | + |
| 137 | + cap.release() |
| 138 | + |
| 139 | + # Display text on the plot |
| 140 | + plt.text(0.5, -0.55, average_latency_text, |
| 141 | + horizontalalignment='center', verticalalignment='center', |
| 142 | + transform=plt.gca().transAxes) |
| 143 | + plt.text(0.5, -0.85, file_info_text, |
| 144 | + horizontalalignment='center', verticalalignment='center', |
| 145 | + transform=plt.gca().transAxes) |
| 146 | + plt.text(0.5, -1, video_properties_text, |
| 147 | + horizontalalignment='center', verticalalignment='center', |
| 148 | + transform=plt.gca().transAxes) |
| 149 | + |
| 150 | + if is_display_attached(): |
| 151 | + plt.show() |
| 152 | + |
| 153 | + if len(sys.argv) == 3: |
| 154 | + filename = sys.argv[2] |
| 155 | + if not filename.endswith('.jpg'): |
| 156 | + filename += '.jpg' |
| 157 | + print("Saving the latency chart to: ", filename) |
| 158 | + plt.savefig(filename, format='jpg', dpi=300) |
| 159 | + |
| 160 | + # Print text to stdout |
| 161 | + print(file_info_text) |
| 162 | + print(video_properties_text) |
| 163 | + print(average_latency_text) |
| 164 | + |
| 165 | +main() |
0 commit comments