You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

257 lines
8.4 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. #!/usr/bin/python3
  2. import pickle
  3. import threading
  4. import sys,getpass
  5. import cv2
  6. import os
  7. import numpy as np
  8. import psutil
  9. import subprocess
  10. from telnetlib import Telnet
  11. from utils import label_map_util
  12. from utils import visualization_utils as vis_util
  13. if getpass.getuser() == "tedankara":
  14. import tensorflow as tf
  15. from distutils.version import StrictVersion
  16. if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):
  17. raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')
  18. else:
  19. # import psutil
  20. pass
  21. import json
  22. import base64
  23. from PIL import Image
  24. from io import BytesIO
  25. from urllib.parse import urlencode
  26. from urllib.request import Request, urlopen
  27. from imutils.video import VideoStream
  28. import ssl
  29. switch = 1
  30. import socket
  31. # This is needed since the notebook is stored in the object_detection folder.
  32. sys.path.append("..")
  33. import time
  34. from object_detection.utils import ops as utils_ops
  35. TELNET = True
  36. AI_IP = '127.0.0.1'
  37. LIGHT_IP = '192.168.2.174'
  38. context = ssl._create_unverified_context()
  39. if TELNET:
  40. tn = Telnet(LIGHT_IP, 31)
  41. light_green = False
  42. # What model to download.
  43. encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
  44. PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt')
  45. category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
  46. def load_image_into_numpy_array(image):
  47. (im_width, im_height) = image.size
  48. return np.array(image.getdata()).reshape(
  49. (im_height, im_width, 3)).astype(np.uint8)
  50. data = {"gpu_temp":"10C","gpu_load":"15%","cpu_temp":"47C","cpu_load":"15%","mem_temp":"NaN","mem_load":"17%","fan_speed":"10000RPM"}
  51. def get_temps():
  52. global data
  53. temps = psutil.sensors_temperatures()
  54. result = subprocess.run(['nvidia-smi', '--query-gpu=utilization.memory', '--format=csv'] , stdout=subprocess.PIPE)
  55. data["gpu_load"] = result.stdout.decode("utf-8").split("\n")[1]
  56. result = subprocess.run(['nvidia-smi', '--query-gpu=temperature.gpu', '--format=csv'] , stdout=subprocess.PIPE)
  57. data["gpu_temp"] = result.stdout.decode("utf-8").split("\n")[1]+"°C"
  58. data["cpu_temp"] = str(int(temps["coretemp"][0][1]))+"°C"
  59. data["cpu_load"] = str(psutil.cpu_percent())+"%"
  60. data["mem_load"] = str(dict(psutil.virtual_memory()._asdict())["percent"])+"%"
  61. data["fan_speed"] = str(psutil.sensors_fans()["dell_smm"][0][1])+"RPM"
  62. def run_inference_for_single_image(image):
  63. _, buffer = cv2.imencode('.jpg', image)
  64. img_base64 = base64.b64encode(buffer).decode('ascii')
  65. url = 'https://%s:5001/ai' % AI_IP # Set destination URL here
  66. post_fields = {'img': img_base64,"type":"coco"} # Set POST fields here
  67. request = Request(url, urlencode(post_fields).encode())
  68. data = urlopen(request, context=context).read().decode("ascii")
  69. output_dict = json.loads(data)
  70. return output_dict
  71. kill = True
  72. def listener(port=8385):
  73. serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  74. serversocket.bind((socket.gethostname(), port))
  75. serversocket.listen(5)
  76. while kill:
  77. serversocket.accept()
  78. print('Bye!')
  79. def lights_on():
  80. global light_green
  81. light_green = True
  82. tn.write(b"1-0")
  83. time.sleep(1)
  84. tn.write(b"2-0")
  85. time.sleep(10)
  86. light_green = False
  87. tn.write(b"0-1")
  88. time.sleep(1)
  89. tn.write(b"0-2")
  90. cut = (150, 250, 250, 150)
  91. cut_send = [0, 0, 0, 0]
  92. img_counter = 0
  93. socket_switch = True
  94. thread = threading.Thread(target=listener)
  95. thread.start()
  96. cam = VideoStream(src=0).start()
  97. switch = 0
  98. get_temps()
  99. # (left, right, top, bottom)
  100. ambulance_coordinates = (150, 400, 250, 400)
  101. reps = -1
  102. reps_vid = 0
  103. while 1:
  104. image = cam.read()
  105. reps_vid += 1
  106. reps += 1
  107. try: # Kavşak
  108. t1 = time.time()
  109. image_np = image
  110. output_dict = run_inference_for_single_image(image_np)
  111. height, width, channels = image_np.shape
  112. cv2.imshow('frmmi', image[ambulance_coordinates[2]:ambulance_coordinates[3], ambulance_coordinates[0]:ambulance_coordinates[1]])
  113. out_dict = {'detection_boxes': [], 'detection_classes': [], 'detection_scores': []}
  114. for index,i in enumerate(output_dict['detection_classes']):
  115. cont = False
  116. if i in [3, 6, 8,44,77]: # Car, bus, truck
  117. score = output_dict['detection_scores'][index]
  118. if score > 0.3:
  119. if not any((output_dict['detection_boxes'][index] == b) for b in out_dict['detection_boxes']):
  120. avg_x = (output_dict['detection_boxes'][index][0] + output_dict['detection_boxes'][index][2])/2
  121. avg_y = (output_dict['detection_boxes'][index][1] + output_dict['detection_boxes'][index][3])/2
  122. for box in out_dict['detection_boxes']:
  123. avg_box_x = (box[0] + box[2])/2
  124. avg_box_y = (box[1] + box[3])/2
  125. if abs(avg_x-avg_box_x) < 0.1 and abs(avg_y-avg_box_y) < 0.1:
  126. cont = True
  127. break
  128. if cont:
  129. continue
  130. out_dict['detection_classes'].append(i)
  131. out_dict['detection_boxes'].append(output_dict['detection_boxes'][index])
  132. out_dict['detection_scores'].append(output_dict['detection_scores'][index])
  133. out_dict['detection_classes'] = np.array(out_dict['detection_classes'])
  134. out_dict['detection_boxes'] = np.array(out_dict['detection_boxes'])
  135. out_dict['detection_scores'] = np.array(out_dict['detection_scores'])
  136. im_height, im_width, _ = image_np.shape
  137. if not light_green and TELNET:
  138. for index, box in enumerate(out_dict['detection_boxes']):
  139. box = tuple(map(int, (box[1] * im_width, box[3] * im_width, box[0] * im_height, box[2] * im_height)))
  140. # (left, right, top, bottom)
  141. if abs((box[0] + box[1])/2 - (ambulance_coordinates[0] + ambulance_coordinates[1])/2) < 25 and \
  142. abs((box[2] + box[3])/2 - (ambulance_coordinates[2] + ambulance_coordinates[3])/2) < 25:
  143. print('ambulance')
  144. threading.Thread(target=lights_on).start()
  145. vis_util.visualize_boxes_and_labels_on_image_array(
  146. image_np,
  147. out_dict['detection_boxes'],
  148. out_dict['detection_classes'],
  149. out_dict['detection_scores'],
  150. category_index,
  151. instance_masks=out_dict.get('detection_masks'),
  152. use_normalized_coordinates=True,
  153. line_thickness=8,
  154. min_score_thresh=0.3
  155. )
  156. #cv2.imshow('frame', image_np)
  157. #ex_c = [27, ord("q"), ord("Q")]
  158. #if cv2.waitKey(1) & 0xFF in ex_c:
  159. # break
  160. t2 = time.time()
  161. print("time taken for {}".format(t2-t1))
  162. if not sys.platform == "win32" and t2-t1 < 0.1:
  163. time.sleep(0.1-(t2-t1))
  164. send_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  165. if socket_switch:
  166. try:
  167. client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  168. client_socket.settimeout(0.1)
  169. client_socket.connect(('127.0.0.1', 8485))
  170. connection = client_socket.makefile('wb')
  171. socket_switch = False
  172. except:
  173. socket_switch = True
  174. continue
  175. try:
  176. crop_img = send_image.copy(order='C')
  177. crop_img = Image.fromarray(crop_img, "RGB")
  178. buffered = BytesIO()
  179. crop_img.save(buffered, format="JPEG")
  180. img = base64.b64encode(buffered.getvalue()).decode("ascii")
  181. lens = [len(send_image), 0, len(send_image[0])]
  182. for i in range(0,len(cut), 2):
  183. if cut[i] < 0:
  184. cut_send[i] = lens[i] + cut[i]
  185. cut_send[i+1] = abs(cut[i])-abs(cut[i+1])
  186. client_socket.sendall(json.dumps({"image_full":img,"image_sizes":{"x":90,"y":0,"width":140,"height":140},"load":data}).encode('gbk')+b"\n")
  187. img_counter += 1
  188. except:
  189. socket_switch = True
  190. if img_counter % 10 == 0:
  191. get_temps()
  192. pass
  193. except Exception as e:
  194. if hasattr(e, 'message'):
  195. print(e.message)
  196. else:
  197. print(e)
  198. break
  199. if not socket_switch:
  200. client_socket.sendall(b"Bye\n")
  201. cam.release()
  202. cv2.destroyAllWindows()
  203. cam.stop()
  204. kill = False
  205. thread.join()