You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

300 lines
11 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. from modules import utils
  2. from flask import Flask, request, Response
  3. from flask_restful import Resource, Api
  4. from PIL import Image
  5. import cv2
  6. import base64
  7. import json
  8. import sys
  9. import os
  10. import io
  11. import itertools
  12. import pickle
  13. import copy
  14. from urllib.parse import urlencode
  15. from urllib.request import Request, urlopen
  16. import ssl
  17. from object_detection.utils import label_map_util
  18. import face_recognition
  19. VEHICLE_CLASSES = [3, 6, 8]
  20. MIN_AREA_RATIO = 0.9
  21. import numpy as np
  22. MIN_SCORE_THRESH = 0.6
  23. if sys.platform == "win32":
  24. sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer')
  25. PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt')
  26. category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
  27. app = Flask(__name__)
  28. api = Api(app)
  29. db_path = os.path.join(app.root_path, 'databases', 'crashes.json')
  30. with open(db_path, 'r') as f:
  31. crashes = json.load(f)
  32. users_path = os.path.join(app.root_path, 'databases', 'users.json')
  33. with open(users_path, 'r') as f:
  34. users = json.load(f)
  35. def load_image_into_numpy_array(image):
  36. (im_width, im_height) = image.size
  37. return np.array(image.getdata()).reshape(
  38. (im_height, im_width, 3)).astype(np.uint8)
  39. context = ssl._create_unverified_context()
  40. def find_name(image):
  41. try:
  42. known_faces = []
  43. known_face_names = []
  44. for v in users.values():
  45. known_faces.append(np.array(v['face_encoding']))
  46. known_face_names.append(v['id'])
  47. face_encoding = face_recognition.face_encodings(image)[0]
  48. results = face_recognition.compare_faces(known_faces, face_encoding)
  49. name = "Unknown"
  50. face_distances = face_recognition.face_distance(known_faces, face_encoding)
  51. best_match_index = np.argmin(face_distances)
  52. if results[best_match_index]:
  53. name = known_face_names[best_match_index]
  54. return name
  55. except:
  56. return None
  57. def rotate_img(img, angle):
  58. (h, w) = img.shape[:2]
  59. x = h if h > w else w
  60. y = h if h > w else w
  61. square = np.zeros((x, y, 3), np.uint8)
  62. square[int((y-h)/2):int(y-(y-h)/2), int((x-w)/2):int(x-(x-w)/2)] = img
  63. (h, w) = square.shape[:2]
  64. center = (w / 2, h / 2)
  65. M = cv2.getRotationMatrix2D(center, angle, 1.0)
  66. rotated = cv2.warpAffine(square, M, (h, w))
  67. return rotated
  68. def process_img(img_base64):
  69. url = 'https://127.0.0.1:5001/ai' # Set destination URL here
  70. post_fields = {'img': img_base64,"type":"coco"} # Set POST fields here
  71. request = Request(url, urlencode(post_fields).encode())
  72. data = urlopen(request, context=context).read().decode("ascii")
  73. output_dict = json.loads(json.loads(data))
  74. image_np = cv2.cvtColor(load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(img_base64)))),cv2.COLOR_RGB2BGR)
  75. output_dict_processed = {"detection_classes":[], "detection_scores":[], "detection_boxes":[]}
  76. im_height, im_width, _ = image_np.shape
  77. cars_involved = 0
  78. injured_people = 0
  79. prev_cars = []
  80. boxes = []
  81. spam_boxes = []
  82. for index, i in enumerate(output_dict['detection_classes']):
  83. score = output_dict['detection_scores'][index]
  84. if score > MIN_SCORE_THRESH:
  85. if i in VEHICLE_CLASSES:
  86. box = output_dict['detection_boxes'][index]
  87. boxes.append(Box((box[1] * im_width, box[3] * im_width,
  88. box[0] * im_height, box[2] * im_height),
  89. i,index))
  90. box_combinations = itertools.combinations(boxes,r=2)
  91. for combination in box_combinations:
  92. big = combination[0].get_bigger(combination[1])
  93. if big and not big in spam_boxes:
  94. spam_boxes.append(big)
  95. for spam in spam_boxes:
  96. boxes.remove(spam)
  97. for box in boxes:
  98. output_dict_processed["detection_classes"].append(output_dict["detection_classes"][box.index])
  99. output_dict_processed["detection_scores"].append(output_dict["detection_scores"][box.index])
  100. output_dict_processed["detection_boxes"].append(output_dict["detection_boxes"][box.index])
  101. people = {}
  102. for index, i in enumerate(output_dict['detection_classes']):
  103. score = output_dict['detection_scores'][index]
  104. if score > MIN_SCORE_THRESH:
  105. if i in VEHICLE_CLASSES:
  106. box = output_dict['detection_boxes'][index]
  107. (left, right, top, bottom) = (box[1] * im_width, box[3] * im_width,
  108. box[0] * im_height, box[2] * im_height)
  109. avg_x = left+right/2
  110. avg_y = top+bottom/2
  111. same = False
  112. for prev_x, prev_y in prev_cars:
  113. if abs(prev_x-avg_x) < 130 and abs(prev_y-avg_y) < 130:
  114. same = True
  115. break
  116. if not same:
  117. cars_involved += 1
  118. prev_cars.append((avg_x, avg_y))
  119. elif i == 1:
  120. box = output_dict['detection_boxes'][index]
  121. #(left, right, top, bottom) = (box[1] * im_width, box[3] * im_width,
  122. # box[0] * im_height, box[2] * im_height)
  123. (left, top, right, bottom) = box
  124. person = image_np[int(top):int(bottom),int(left):int(right)]
  125. if right-left > bottom-top:
  126. rotated = rotate_img(person, 90)
  127. name = None
  128. try:
  129. face_locs = face_recognition.face_locations(rotated)[0]
  130. name = find_name(rotated)
  131. except Exception:
  132. pass
  133. (height_person,width_person)=person.shape[:2]
  134. excess=(width_person-height_person)/2
  135. if name is None:
  136. rotated = rotate_img(person, 270)
  137. face_locs = face_recognition.face_locations(rotated)[0]
  138. name = find_name(rotated)
  139. face_locs_processed = (top + face_locs[1]-excess,left+face_locs[2],top+face_locs[3]-excess,left+face_locs[0])
  140. else:
  141. face_locs_processed = (top + face_locs[3]-excess,right-face_locs[2],top+face_locs[1]-excess,right-face_locs[0])
  142. cv2.imshow('test.jpg', rotated)
  143. cv2.waitKey(0)
  144. cv2.destroyAllWindows()
  145. people[index] = [0, face_locs_processed, name]
  146. else:
  147. face_locs = face_recognition.face_locations(person)[0]
  148. face_locs_processed = (top+face_locs[0],left+face_locs[1],top+face_locs[2],left+face_locs[3])
  149. name = find_name(person)
  150. people[index] = [1, face_locs_processed, name]
  151. _, buffer = cv2.imencode('.jpg', image_np)
  152. # image_process = image_np[:]
  153. # vis_util.visualize_boxes_and_labels_on_image_array(
  154. # image_process,
  155. # output_dict_processed["detection_boxes"],
  156. # output_dict_processed["detection_classes"],
  157. # output_dict_processed["detection_scores"],
  158. # category_index,
  159. # min_score_thresh=MIN_SCORE_THRESH,
  160. # use_normalized_coordinates=True,
  161. # line_thickness=8)
  162. # cv2.imshow("a",image_process)
  163. # cv2.waitKey(0)
  164. for i in range(len(output_dict_processed["detection_classes"])):
  165. output_dict_processed["detection_classes"][i] = category_index[output_dict_processed["detection_classes"][i]]["name"]
  166. return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people,output_dict_processed,people
  167. class Crash(Resource):
  168. def post(self):
  169. message = request.form['message']
  170. base64_img = request.form['img']
  171. id = request.form['id']
  172. lat, long = request.form['lat'], request.form['long']
  173. image, car_count, injured,out,people = process_img(base64_img)
  174. (top, right, bottom, left) = people[0][1]
  175. top = int(top)
  176. right = int(right)
  177. left = int(left)
  178. bottom = int(bottom)
  179. img = load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(base64_img))))
  180. cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),3)
  181. cv2.imshow('test.jpg', img)
  182. cv2.waitKey(0)
  183. cv2.destroyAllWindows()
  184. print(people)
  185. priority = car_count + injured
  186. if priority > 10:
  187. priority = 10
  188. crash = {
  189. 'img': image,
  190. 'message': message,
  191. 'priority': priority,
  192. 'stats': {
  193. 'cars': car_count,
  194. 'injured': injured
  195. },
  196. 'location': {
  197. 'latitude': lat,
  198. 'longitude': long
  199. },
  200. "output_dict": out
  201. }
  202. if id in crashes:
  203. crashes[id].append(crash)
  204. else:
  205. crashes[id] = [crash]
  206. with open(db_path, 'w') as f:
  207. json.dump(crashes, f, indent=4)
  208. return crash
  209. class Crashes(Resource):
  210. def post(self):
  211. process_dict = copy.deepcopy(crashes)
  212. return_dict = {}
  213. for id in process_dict:
  214. for i in range(len(process_dict[id])):
  215. del process_dict[id][i]["img"]
  216. for id in process_dict:
  217. for i in range(len(process_dict[id])):
  218. location = process_dict[id][i]['location']
  219. lat, lng = float(request.form['lat']), float(request.form['lng'])
  220. if abs(float(location['latitude']) - lat) < 0.3 and abs(float(location['longitude']) - lng) < 0.3:
  221. if id in return_dict:
  222. return_dict[id].append(process_dict[id][i])
  223. else:
  224. return_dict[id] = [process_dict[id][i]]
  225. return return_dict
  226. class Box:
  227. def __init__(self,coords, type,index):
  228. self.x1 = coords[0]
  229. self.y1 = coords[2]
  230. self.x2 = coords[1]
  231. self.y2 = coords[3]
  232. self.area = (self.x2-self.x1) * (self.y2-self.y1)
  233. self.type = type
  234. self.index = index
  235. def get_bigger(self,box):
  236. if box.type != self.type:
  237. return None
  238. left = max(box.x1, self.x1)
  239. right = min(box.x2, self.x2)
  240. bottom = max(box.y2, self.y2)
  241. top = min(box.y1, self.y1)
  242. if not left < right and bottom < top:
  243. return None
  244. area_temp = abs((right-left)*(top-bottom))
  245. if abs((right-left)*(top-bottom))/((box.area * (box.area < self.area)) + (self.area * (box.area > self.area))) < MIN_AREA_RATIO:
  246. return None
  247. if box.area > self.area:
  248. return box
  249. else:
  250. return self