You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

283 lines
9.5 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. from modules import utils
  2. from flask import Flask, request, Response
  3. from flask_restful import Resource, Api
  4. from PIL import Image
  5. import cv2
  6. import base64
  7. import json
  8. import sys
  9. import os
  10. import io
  11. import itertools
  12. import pickle
  13. import copy
  14. from urllib.parse import urlencode
  15. from urllib.request import Request, urlopen
  16. import ssl
  17. from object_detection.utils import label_map_util
  18. import face_recognition
  19. AI_IP = '10.10.26.161'
  20. VEHICLE_CLASSES = [3, 6, 8]
  21. MIN_AREA_RATIO = 0.9
  22. import numpy as np
  23. MIN_SCORE_THRESH = 0.6
  24. if sys.platform == "win32":
  25. sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer')
  26. PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt')
  27. category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
  28. app = Flask(__name__)
  29. api = Api(app)
  30. db_path = os.path.join(app.root_path, 'databases', 'crashes.json')
  31. with open(db_path, 'r') as f:
  32. crashes = json.load(f)
  33. users_path = os.path.join(app.root_path, 'databases', 'users.json')
  34. with open(users_path, 'r') as f:
  35. users = json.load(f)
  36. def load_image_into_numpy_array(image):
  37. (im_width, im_height) = image.size
  38. return np.array(image.getdata()).reshape(
  39. (im_height, im_width, 3)).astype(np.uint8)
  40. context = ssl._create_unverified_context()
  41. def find_name(image):
  42. try:
  43. known_faces = []
  44. known_face_names = []
  45. for v in users.values():
  46. known_faces.append(np.array(v['face_encoding']))
  47. known_face_names.append(v['id'])
  48. face_encoding = face_recognition.face_encodings(image)[0]
  49. results = face_recognition.compare_faces(known_faces, face_encoding)
  50. name = "Unknown"
  51. face_distances = face_recognition.face_distance(known_faces, face_encoding)
  52. best_match_index = np.argmin(face_distances)
  53. if results[best_match_index]:
  54. name = known_face_names[best_match_index]
  55. return name
  56. except:
  57. return None
  58. def rotate_img(img, angle):
  59. if angle == 90:
  60. return np.rot90(img)
  61. elif angle == 270:
  62. return np.rot90(np.rot90(np.rot90(img)))
  63. def process_img(img_base64):
  64. url = 'https://192.168.2.238:5001/ai' # Set destination URL here
  65. post_fields = {'img': img_base64,"type":"coco"} # Set POST fields here
  66. request = Request(url, urlencode(post_fields).encode())
  67. data = urlopen(request, context=context).read().decode("ascii")
  68. output_dict = json.loads(json.loads(data))
  69. image_np = cv2.cvtColor(load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(img_base64)))),cv2.COLOR_RGB2BGR)
  70. output_dict_processed = {"detection_classes":[], "detection_scores":[], "detection_boxes":[]}
  71. im_height, im_width, _ = image_np.shape
  72. cars_involved = 0
  73. injured_people = 0
  74. prev_cars = []
  75. boxes = []
  76. spam_boxes = []
  77. for index, i in enumerate(output_dict['detection_classes']):
  78. score = output_dict['detection_scores'][index]
  79. if score > MIN_SCORE_THRESH:
  80. box = output_dict['detection_boxes'][index]
  81. boxes.append(Box((box[1] * im_width, box[3] * im_width,
  82. box[0] * im_height, box[2] * im_height),
  83. i,index))
  84. box_combinations = itertools.combinations(boxes,r=2)
  85. for combination in box_combinations:
  86. big = combination[0].get_bigger(combination[1])
  87. if big and not big in spam_boxes:
  88. spam_boxes.append(big)
  89. for spam in spam_boxes:
  90. boxes.remove(spam)
  91. for box in boxes:
  92. output_dict_processed["detection_classes"].append(output_dict["detection_classes"][box.index])
  93. output_dict_processed["detection_scores"].append(output_dict["detection_scores"][box.index])
  94. output_dict_processed["detection_boxes"].append(output_dict["detection_boxes"][box.index])
  95. people = {}
  96. for index, i in enumerate(output_dict['detection_classes']):
  97. score = output_dict['detection_scores'][index]
  98. if score > MIN_SCORE_THRESH:
  99. if i in VEHICLE_CLASSES:
  100. box = output_dict['detection_boxes'][index]
  101. (left, right, top, bottom) = (box[1] * im_width, box[3] * im_width,
  102. box[0] * im_height, box[2] * im_height)
  103. avg_x = left+right/2
  104. avg_y = top+bottom/2
  105. same = False
  106. for prev_x, prev_y in prev_cars:
  107. if abs(prev_x-avg_x) < 130 and abs(prev_y-avg_y) < 130:
  108. same = True
  109. break
  110. if not same:
  111. cars_involved += 1
  112. prev_cars.append((avg_x, avg_y))
  113. elif i == 1:
  114. box = output_dict['detection_boxes'][index]
  115. (left, right, top, bottom) = tuple(map(int, (box[1] * im_width, box[3] * im_width,
  116. box[0] * im_height, box[2] * im_height)))
  117. person = image_np[int(top):int(bottom),int(left):int(right)]
  118. if right-left > bottom-top:
  119. rotated = rotate_img(person, 90)
  120. name = None
  121. try:
  122. face_locs = face_recognition.face_locations(rotated)[0]
  123. name = find_name(rotated)
  124. except Exception:
  125. pass
  126. (height_person,width_person) = person.shape[:2]
  127. if name is None:
  128. rotated = rotate_img(person, 270)
  129. face_locs = face_recognition.face_locations(rotated)[0]
  130. name = find_name(rotated)
  131. (top_face, right_face, bottom_face, left_face) = face_locs
  132. face_locs_processed = (top + height_person - right_face,left+bottom_face,top + height_person - left_face,left+top_face)
  133. else:
  134. (top_face, right_face, bottom_face, left_face) = face_locs
  135. person = cv2.rectangle(person, (width_person - bottom_face, left_face), (width_person - top_face, right_face), (0, 255, 0), 3)
  136. face_locs_processed = (top + left_face,left + width_person - top_face,top + right_face,left + width_person - bottom_face)
  137. people[index] = [0, face_locs_processed, name]
  138. else:
  139. face_locs = face_recognition.face_locations(person)[0]
  140. (top_face, right_face, bottom_face, left_face) = face_locs
  141. face_locs_processed = (top+face_locs[0],left+face_locs[1],top+face_locs[2],left+face_locs[3])
  142. name = find_name(person)
  143. people[index] = [1, face_locs_processed, name]
  144. _, buffer = cv2.imencode('.jpg', image_np)
  145. for i in range(len(output_dict_processed["detection_classes"])):
  146. box = output_dict_processed["detection_boxes"][i]
  147. output_dict_processed["detection_boxes"][i] = [box[1] * im_width, box[3] * im_width, box[0] * im_height, box[2] * im_height]
  148. output_dict_processed["detection_classes"][i] = category_index[output_dict_processed["detection_classes"][i]]["name"]
  149. return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people,output_dict_processed,people
  150. class Crash(Resource):
  151. def post(self):
  152. message = request.form['message']
  153. base64_img = request.form['img']
  154. id = request.form['id']
  155. lat, long = request.form['lat'], request.form['long']
  156. image, car_count, injured,out,people = process_img(base64_img)
  157. (top, right, bottom, left) = people[0][1]
  158. top = int(top)
  159. right = int(right)
  160. left = int(left)
  161. bottom = int(bottom)
  162. img = load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(base64_img))))
  163. cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),3)
  164. cv2.imshow('test.jpg', img)
  165. cv2.waitKey(0)
  166. cv2.destroyAllWindows()
  167. print(people)
  168. priority = car_count + injured
  169. if priority > 10:
  170. priority = 10
  171. crash = {
  172. 'img': image,
  173. 'message': message,
  174. 'priority': priority,
  175. 'stats': {
  176. 'cars': car_count,
  177. 'injured': injured
  178. },
  179. 'location': {
  180. 'latitude': lat,
  181. 'longitude': long
  182. },
  183. "output_dict": out,
  184. "people":people
  185. }
  186. if id in crashes:
  187. crashes[id].append(crash)
  188. else:
  189. crashes[id] = [crash]
  190. with open(db_path, 'w') as f:
  191. json.dump(crashes, f, indent=2)
  192. return crash
  193. class Crashes(Resource):
  194. def post(self):
  195. process_dict = copy.deepcopy(crashes)
  196. return_dict = {}
  197. for id in process_dict:
  198. for i in range(len(process_dict[id])):
  199. del process_dict[id][i]["img"]
  200. for id in process_dict:
  201. for i in range(len(process_dict[id])):
  202. location = process_dict[id][i]['location']
  203. lat, lng = float(request.form['lat']), float(request.form['lng'])
  204. if abs(float(location['latitude']) - lat) < 0.3 and abs(float(location['longitude']) - lng) < 0.3:
  205. if id in return_dict:
  206. return_dict[id].append(process_dict[id][i])
  207. else:
  208. return_dict[id] = [process_dict[id][i]]
  209. return return_dict
  210. class Box:
  211. def __init__(self,coords, type,index):
  212. self.x1 = coords[0]
  213. self.y1 = coords[2]
  214. self.x2 = coords[1]
  215. self.y2 = coords[3]
  216. self.area = (self.x2-self.x1) * (self.y2-self.y1)
  217. self.type = type
  218. self.index = index
  219. def get_bigger(self,box):
  220. if box.type != self.type:
  221. return None
  222. left = max(box.x1, self.x1)
  223. right = min(box.x2, self.x2)
  224. bottom = max(box.y2, self.y2)
  225. top = min(box.y1, self.y1)
  226. if not left < right and bottom < top:
  227. return None
  228. area_temp = abs((right-left)*(top-bottom))
  229. if abs((right-left)*(top-bottom))/((box.area * (box.area < self.area)) + (self.area * (box.area > self.area))) < MIN_AREA_RATIO:
  230. return None
  231. if box.area > self.area:
  232. return box
  233. else:
  234. return self