You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

210 lines
7.6 KiB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. from modules import utils
  2. from flask import Flask, request, Response
  3. from flask_restful import Resource, Api
  4. from PIL import Image
  5. import cv2
  6. import base64
  7. import json
  8. import sys
  9. import os
  10. import io
  11. MIN_AREA_RATIO = 0.1
  12. MIN_SCORE_THRESH = 0.6
  13. if sys.platform == "win32":
  14. import tensorflow as tf
  15. import numpy as np
  16. import pickle
  17. sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer')
  18. from utils import label_map_util
  19. from utils import visualization_utils as vis_util
  20. app = Flask(__name__)
  21. api = Api(app)
  22. db_path = os.path.join(app.root_path, 'databases', 'crashes.json')
  23. with open(db_path, 'r') as f:
  24. crashes = json.load(f)
  25. users_path = os.path.join(app.root_path, 'databases', 'users.json')
  26. with open(users_path, 'r') as f:
  27. users = json.load(f)
  28. if sys.platform == "win32":
  29. # PATH_TO_LABELS = '../../traffic_analyzer/object_detection/data/kitti_label_map.pbtxt'
  30. # PATH_TO_CKPT = 'modules/faster_rcnn_resnet101_kitti_2018_01_28/frozen_inference_graph.pb'
  31. PATH_TO_LABELS = '../../traffic_analyzer/object_detection/data/mscoco_label_map.pbtxt'
  32. PATH_TO_CKPT = '../../traffic_analyzer/rfcn_resnet101_coco_2018_01_28/frozen_inference_graph.pb'
  33. category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
  34. detection_graph = tf.Graph()
  35. with detection_graph.as_default():
  36. od_graph_def = tf.GraphDef()
  37. with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
  38. serialized_graph = fid.read()
  39. od_graph_def.ParseFromString(serialized_graph)
  40. tf.import_graph_def(od_graph_def, name='')
  41. def load_image_into_numpy_array(image):
  42. (im_width, im_height) = image.size
  43. return np.array(image.getdata()).reshape(
  44. (im_height, im_width, 3)).astype(np.uint8)
  45. def process_img(img_base64):
  46. if sys.platform == "win32":
  47. img = Image.open(io.BytesIO(base64.b64decode(img_base64)))
  48. with detection_graph.as_default():
  49. with tf.Session(graph=detection_graph) as sess:
  50. # Definite input and output Tensors for detection_graph
  51. image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
  52. # Each box represents a part of the image where a particular object was detected.
  53. detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
  54. # Each score represent how level of confidence for each of the objects.
  55. # Score is shown on the result image, together with the class label.
  56. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
  57. detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
  58. num_detections = detection_graph.get_tensor_by_name('num_detections:0')
  59. # the array based representation of the image will be used later in order to prepare the
  60. # result image with boxes and labels on it.
  61. image_np = load_image_into_numpy_array(img)
  62. # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
  63. image_np_expanded = np.expand_dims(image_np, axis=0)
  64. # Actual detection.
  65. (boxes, scores, classes, num) = sess.run(
  66. [detection_boxes, detection_scores, detection_classes, num_detections],
  67. feed_dict={image_tensor: image_np_expanded})
  68. # Visualization of the results of a detection.
  69. vis_util.visualize_boxes_and_labels_on_image_array(
  70. image_np,
  71. np.squeeze(boxes),
  72. np.squeeze(classes).astype(np.int32),
  73. np.squeeze(scores),
  74. category_index,
  75. min_score_thresh=MIN_SCORE_THRESH,
  76. use_normalized_coordinates=True,
  77. line_thickness=8)
  78. output_dict = {'detection_classes': np.squeeze(classes).astype(np.int32), 'detection_scores': np.squeeze(scores), 'detection_boxes': np.squeeze(boxes)}
  79. with open('image_1_data.pkl', 'wb') as f:
  80. pickle.dump(output_dict, f)
  81. cv2.imwrite('image_1.jpg', image_np)
  82. im_height, im_width, _ = image_np.shape
  83. cars_involved = 0
  84. injured_people = 0
  85. prev_cars = []
  86. for index, i in enumerate(output_dict['detection_classes']):
  87. score = output_dict['detection_scores'][index]
  88. if score > MIN_SCORE_THRESH:
  89. if i in [3, 6, 8]:
  90. box = output_dict['detection_boxes'][index]
  91. (left, right, top, bottom) = (box[1] * im_width, box[3] * im_width,
  92. box[0] * im_height, box[2] * im_height)
  93. avg_x = left+right/2
  94. avg_y = top+bottom/2
  95. same = False
  96. for prev_x, prev_y in prev_cars:
  97. if abs(prev_x-avg_x) < 130 and abs(prev_y-avg_y) < 130:
  98. same = True
  99. break
  100. if not same:
  101. cars_involved += 1
  102. prev_cars.append((avg_x, avg_y))
  103. elif i == 1:
  104. box = output_dict['detection_boxes'][index]
  105. (left, right, top, bottom) = (box[1] * im_width, box[3] * im_width,
  106. box[0] * im_height, box[2] * im_height)
  107. if right-left > bottom-top:
  108. injured_people += 1
  109. _, buffer = cv2.imencode('.jpg', image_np)
  110. return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people
  111. return img_base64, 7, ["unprocessed"]
  112. class Crash(Resource):
  113. def post(self):
  114. message = request.form['message']
  115. base64_img = request.form['img']
  116. id = request.form['id']
  117. lat, long = request.form['lat'], request.form['long']
  118. image, car_count, injured = process_img(base64_img)
  119. priority = car_count + injured
  120. if priority > 10:
  121. priority = 10
  122. crash = {
  123. 'img': image,
  124. 'message': message,
  125. 'priority': priority,
  126. 'stats': {
  127. 'cars': car_count,
  128. 'injured': injured
  129. },
  130. 'location': {
  131. 'latitude': lat,
  132. 'longitude': long
  133. }
  134. }
  135. if id in crashes:
  136. crashes[id].append(crash)
  137. else:
  138. crashes[id] = [crash]
  139. with open(db_path, 'w') as f:
  140. json.dump(crashes, f, indent=4)
  141. cv2.imshow("a",load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(image)))))
  142. cv2.waitKey(0)
  143. return crash
  144. class Crashes(Resource):
  145. def get(self):
  146. return crashes
  147. class Box:
  148. def __init__(self,coords, type):
  149. self.x1 = coords[0]
  150. self.y1 = coords[2]
  151. self.x2 = coords[1]
  152. self.y2 = coords[3]
  153. self.area = (self.x2-self.x1) * (self.y2-self.y1)
  154. self.type = type
  155. def get_bigger(self,box):
  156. if box.type == self.type:
  157. return None
  158. left = max(box.x1, self.x1)
  159. right = min(box.x2, self.x2)
  160. bottom = max(box.y2, self.y2)
  161. top = min(box.y1, self.y1)
  162. if not left < right and bottom < top:
  163. return None
  164. if ((box.area * (box.area < self.area)) + (self.area * (box.area > self.area))) / (right-left)*(top-bottom) < MIN_AREA_RATIO:
  165. return None
  166. if box.area > self.area:
  167. return box
  168. else:
  169. return self