You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

284 lines
10 KiB

6 years ago
  1. from modules import utils
  2. from flask import Flask, request, Response
  3. from flask_restful import Resource, Api
  4. from PIL import Image
  5. import cv2
  6. import base64
  7. import json
  8. import sys,getpass
  9. import os
  10. import io
  11. import itertools
  12. import pickle
  13. import copy
  14. from urllib.parse import urlencode
  15. from urllib.request import Request, urlopen
  16. import ssl
  17. from object_detection.utils import label_map_util
  18. import face_recognition
  19. AI_IP = '10.10.26.161'
  20. VEHICLE_CLASSES = [3, 6, 8]
  21. MIN_AREA_RATIO = 0.9
  22. import numpy as np
  23. MIN_SCORE_THRESH = 0.6
  24. if getpass.getuser() == "tedankara":
  25. sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer')
  26. PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt')
  27. category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
  28. app = Flask(__name__)
  29. api = Api(app)
  30. db_path = os.path.join(app.root_path, 'databases', 'crashes.json')
  31. with open(db_path, 'r') as f:
  32. crashes = json.load(f)
  33. users_path = os.path.join(app.root_path, 'databases', 'users.json')
  34. with open(users_path, 'r') as f:
  35. users = json.load(f)
  36. def load_image_into_numpy_array(image):
  37. (im_width, im_height) = image.size
  38. return np.array(image.getdata()).reshape(
  39. (im_height, im_width, 3)).astype(np.uint8)
  40. context = ssl._create_unverified_context()
  41. def find_name(image):
  42. try:
  43. known_faces = []
  44. known_face_names = []
  45. for v in users.values():
  46. known_faces.append(np.array(v['face_encoding']))
  47. known_face_names.append(v['id'])
  48. face_encoding = face_recognition.face_encodings(image)[0]
  49. results = face_recognition.compare_faces(known_faces, face_encoding)
  50. name = "Unknown"
  51. face_distances = face_recognition.face_distance(known_faces, face_encoding)
  52. best_match_index = np.argmin(face_distances)
  53. if results[best_match_index]:
  54. name = known_face_names[best_match_index]
  55. return name
  56. except:
  57. return None
  58. def rotate_img(img, angle):
  59. if angle == 90:
  60. return np.rot90(img)
  61. elif angle == 270:
  62. return np.rot90(np.rot90(np.rot90(img)))
  63. def process_img(img_base64):
  64. url = 'https://{}:5001/ai'.format(AI_IP) # Set destination URL here
  65. post_fields = {'img': img_base64,"type":"coco"} # Set POST fields here
  66. request = Request(url, urlencode(post_fields).encode())
  67. data = urlopen(request, context=context).read().decode("ascii")
  68. output_dict = json.loads(json.loads(data))
  69. image_np = cv2.cvtColor(load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(img_base64)))),cv2.COLOR_RGB2BGR)
  70. output_dict_processed = {"detection_classes":[], "detection_scores":[], "detection_boxes":[]}
  71. im_height, im_width, _ = image_np.shape
  72. cars_involved = 0
  73. injured_people = 0
  74. prev_cars = []
  75. boxes = []
  76. spam_boxes = []
  77. for index, i in enumerate(output_dict['detection_classes']):
  78. score = output_dict['detection_scores'][index]
  79. if score > MIN_SCORE_THRESH:
  80. box = output_dict['detection_boxes'][index]
  81. boxes.append(Box((box[1] * im_width, box[3] * im_width,
  82. box[0] * im_height, box[2] * im_height),
  83. i,index))
  84. box_combinations = itertools.combinations(boxes,r=2)
  85. for combination in box_combinations:
  86. big = combination[0].get_bigger(combination[1])
  87. if big and not big in spam_boxes:
  88. spam_boxes.append(big)
  89. for spam in spam_boxes:
  90. boxes.remove(spam)
  91. for box in boxes:
  92. output_dict_processed["detection_classes"].append(output_dict["detection_classes"][box.index])
  93. output_dict_processed["detection_scores"].append(output_dict["detection_scores"][box.index])
  94. output_dict_processed["detection_boxes"].append(output_dict["detection_boxes"][box.index])
  95. people = {}
  96. for index, i in enumerate(output_dict['detection_classes']):
  97. score = output_dict['detection_scores'][index]
  98. if score > MIN_SCORE_THRESH:
  99. if i in VEHICLE_CLASSES:
  100. box = output_dict['detection_boxes'][index]
  101. (left, right, top, bottom) = (box[1] * im_width, box[3] * im_width,
  102. box[0] * im_height, box[2] * im_height)
  103. avg_x = left+right/2
  104. avg_y = top+bottom/2
  105. same = False
  106. for prev_x, prev_y in prev_cars:
  107. if abs(prev_x-avg_x) < 130 and abs(prev_y-avg_y) < 130:
  108. same = True
  109. break
  110. if not same:
  111. cars_involved += 1
  112. prev_cars.append((avg_x, avg_y))
  113. elif i == 1:
  114. continue
  115. box = output_dict['detection_boxes'][index]
  116. (left, right, top, bottom) = tuple(map(int, (box[1] * im_width, box[3] * im_width,
  117. box[0] * im_height, box[2] * im_height)))
  118. person = image_np[int(top):int(bottom),int(left):int(right)]
  119. if right-left > bottom-top:
  120. rotated = rotate_img(person, 90)
  121. name = None
  122. try:
  123. face_locs = face_recognition.face_locations(rotated)[0]
  124. name = find_name(rotated)
  125. except Exception:
  126. pass
  127. (height_person,width_person) = person.shape[:2]
  128. if name is None:
  129. rotated = rotate_img(person, 270)
  130. face_locs = face_recognition.face_locations(rotated)[0]
  131. name = find_name(rotated)
  132. (top_face, right_face, bottom_face, left_face) = face_locs
  133. face_locs_processed = (top + height_person - right_face,left+bottom_face,top + height_person - left_face,left+top_face)
  134. else:
  135. (top_face, right_face, bottom_face, left_face) = face_locs
  136. person = cv2.rectangle(person, (width_person - bottom_face, left_face), (width_person - top_face, right_face), (0, 255, 0), 3)
  137. face_locs_processed = (top + left_face,left + width_person - top_face,top + right_face,left + width_person - bottom_face)
  138. people[index] = [0, face_locs_processed, name]
  139. else:
  140. face_locs = face_recognition.face_locations(person)[0]
  141. (top_face, right_face, bottom_face, left_face) = face_locs
  142. face_locs_processed = (top+face_locs[0],left+face_locs[1],top+face_locs[2],left+face_locs[3])
  143. name = find_name(person)
  144. people[index] = [1, face_locs_processed, name]
  145. _, buffer = cv2.imencode('.jpg', image_np)
  146. for i in range(len(output_dict_processed["detection_classes"])):
  147. box = output_dict_processed["detection_boxes"][i]
  148. output_dict_processed["detection_boxes"][i] = [box[1] * im_width, box[3] * im_width, box[0] * im_height, box[2] * im_height]
  149. output_dict_processed["detection_classes"][i] = category_index[output_dict_processed["detection_classes"][i]]["name"]
  150. return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people,output_dict_processed,people
  151. class Crash(Resource):
  152. def post(self):
  153. message = request.form['message']
  154. base64_img = request.form['img']
  155. id = request.form['id']
  156. lat, long = request.form['lat'], request.form['long']
  157. image, car_count, injured,out,people = process_img(base64_img)
  158. (top, right, bottom, left) = people[0][1]
  159. top = int(top)
  160. right = int(right)
  161. left = int(left)
  162. bottom = int(bottom)
  163. img = load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(base64_img))))
  164. cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),3)
  165. cv2.imshow('test.jpg', img)
  166. cv2.waitKey(0)
  167. cv2.destroyAllWindows()
  168. print(people)
  169. priority = car_count + injured
  170. if priority > 10:
  171. priority = 10
  172. crash = {
  173. 'img': image,
  174. 'message': message,
  175. 'priority': priority,
  176. 'stats': {
  177. 'cars': car_count,
  178. 'injured': injured
  179. },
  180. 'location': {
  181. 'latitude': lat,
  182. 'longitude': long
  183. },
  184. "output_dict": out,
  185. "people":people
  186. }
  187. if id in crashes:
  188. crashes[id].append(crash)
  189. else:
  190. crashes[id] = [crash]
  191. with open(db_path, 'w') as f:
  192. json.dump(crashes, f, indent=2)
  193. return crash
  194. class Crashes(Resource):
  195. def post(self):
  196. process_dict = copy.deepcopy(crashes)
  197. return_dict = {}
  198. for id in process_dict:
  199. for i in range(len(process_dict[id])):
  200. del process_dict[id][i]["img"]
  201. for id in process_dict:
  202. for i in range(len(process_dict[id])):
  203. location = process_dict[id][i]['location']
  204. lat, lng = float(request.form['lat']), float(request.form['lng'])
  205. if abs(float(location['latitude']) - lat) < 0.3 and abs(float(location['longitude']) - lng) < 0.3:
  206. if id in return_dict:
  207. return_dict[id].append(process_dict[id][i])
  208. else:
  209. return_dict[id] = [process_dict[id][i]]
  210. return return_dict
  211. class Box:
  212. def __init__(self,coords, type,index):
  213. self.x1 = coords[0]
  214. self.y1 = coords[2]
  215. self.x2 = coords[1]
  216. self.y2 = coords[3]
  217. self.area = (self.x2-self.x1) * (self.y2-self.y1)
  218. self.type = type
  219. self.index = index
  220. def get_bigger(self,box):
  221. if box.type != self.type:
  222. return None
  223. left = max(box.x1, self.x1)
  224. right = min(box.x2, self.x2)
  225. bottom = max(box.y2, self.y2)
  226. top = min(box.y1, self.y1)
  227. if not left < right and bottom < top:
  228. return None
  229. area_temp = abs((right-left)*(top-bottom))
  230. if abs((right-left)*(top-bottom))/((box.area * (box.area < self.area)) + (self.area * (box.area > self.area))) < MIN_AREA_RATIO:
  231. return None
  232. if box.area > self.area:
  233. return box
  234. else:
  235. return self