|
|
@ -13,10 +13,12 @@ import os |
|
|
|
import io |
|
|
|
import itertools |
|
|
|
import pickle |
|
|
|
|
|
|
|
|
|
|
|
from object_detection.utils import visualization_utils as vis_util |
|
|
|
import copy |
|
|
|
from urllib.parse import urlencode |
|
|
|
from urllib.request import Request, urlopen |
|
|
|
import ssl |
|
|
|
from object_detection.utils import label_map_util |
|
|
|
import face_recognition |
|
|
|
|
|
|
|
VEHICLE_CLASSES = [3, 6, 8] |
|
|
|
MIN_AREA_RATIO = 0.9 |
|
|
@ -25,10 +27,12 @@ import numpy as np |
|
|
|
MIN_SCORE_THRESH = 0.6 |
|
|
|
|
|
|
|
if sys.platform == "win32": |
|
|
|
import tensorflow as tf |
|
|
|
|
|
|
|
sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer') |
|
|
|
|
|
|
|
PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt') |
|
|
|
|
|
|
|
|
|
|
|
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) |
|
|
|
|
|
|
|
app = Flask(__name__) |
|
|
|
api = Api(app) |
|
|
@ -40,53 +44,39 @@ with open(db_path, 'r') as f: |
|
|
|
users_path = os.path.join(app.root_path, 'databases', 'users.json') |
|
|
|
with open(users_path, 'r') as f: |
|
|
|
users = json.load(f) |
|
|
|
PATH_TO_LABELS = '../../traffic_analyzer/object_detection/data/mscoco_label_map.pbtxt' |
|
|
|
PATH_TO_CKPT = '../../traffic_analyzer/rfcn_resnet101_coco_2018_01_28/frozen_inference_graph.pb' |
|
|
|
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) |
|
|
|
if sys.platform == "win32": |
|
|
|
# PATH_TO_LABELS = '../../traffic_analyzer/object_detection/data/kitti_label_map.pbtxt' |
|
|
|
# PATH_TO_CKPT = 'modules/faster_rcnn_resnet101_kitti_2018_01_28/frozen_inference_graph.pb' |
|
|
|
|
|
|
|
detection_graph = tf.Graph() |
|
|
|
with detection_graph.as_default(): |
|
|
|
od_graph_def = tf.GraphDef() |
|
|
|
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: |
|
|
|
serialized_graph = fid.read() |
|
|
|
od_graph_def.ParseFromString(serialized_graph) |
|
|
|
tf.import_graph_def(od_graph_def, name='') |
|
|
|
|
|
|
|
def load_image_into_numpy_array(image): |
|
|
|
(im_width, im_height) = image.size |
|
|
|
return np.array(image.getdata()).reshape( |
|
|
|
|
|
|
|
def load_image_into_numpy_array(image): |
|
|
|
(im_width, im_height) = image.size |
|
|
|
return np.array(image.getdata()).reshape( |
|
|
|
(im_height, im_width, 3)).astype(np.uint8) |
|
|
|
|
|
|
|
context = ssl._create_unverified_context() |
|
|
|
|
|
|
|
def find_name(image): |
|
|
|
known_faces = [] |
|
|
|
known_face_names = [] |
|
|
|
for v in users.values(): |
|
|
|
known_faces.append(np.array(v['face_encoding'])) |
|
|
|
known_face_names.append(v['realname']) |
|
|
|
|
|
|
|
face_encoding = face_recognition.face_encodings(image)[0] |
|
|
|
results = face_recognition.compare_faces(known_faces, face_encoding) |
|
|
|
name = "Unknown" |
|
|
|
face_distances = face_recognition.face_distance(known_faces, face_encoding) |
|
|
|
best_match_index = np.argmin(face_distances) |
|
|
|
if results[best_match_index]: |
|
|
|
name = known_face_names[best_match_index] |
|
|
|
|
|
|
|
return name |
|
|
|
|
|
|
|
def process_img(img_base64): |
|
|
|
if sys.platform == "win32": |
|
|
|
img = Image.open(io.BytesIO(base64.b64decode(img_base64))) |
|
|
|
with detection_graph.as_default(): |
|
|
|
with tf.Session(graph=detection_graph) as sess: |
|
|
|
# Definite input and output Tensors for detection_graph |
|
|
|
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') |
|
|
|
# Each box represents a part of the image where a particular object was detected. |
|
|
|
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') |
|
|
|
# Each score represent how level of confidence for each of the objects. |
|
|
|
# Score is shown on the result image, together with the class label. |
|
|
|
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') |
|
|
|
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') |
|
|
|
num_detections = detection_graph.get_tensor_by_name('num_detections:0') |
|
|
|
# the array based representation of the image will be used later in order to prepare the |
|
|
|
# result image with boxes and labels on it. |
|
|
|
image_np = load_image_into_numpy_array(img) |
|
|
|
# Expand dimensions since the model expects images to have shape: [1, None, None, 3] |
|
|
|
image_np_expanded = np.expand_dims(image_np, axis=0) |
|
|
|
# Actual detection. |
|
|
|
(boxes, scores, classes, num) = sess.run( |
|
|
|
[detection_boxes, detection_scores, detection_classes, num_detections], |
|
|
|
feed_dict={image_tensor: image_np_expanded}) |
|
|
|
# Visualization of the results of a detection. |
|
|
|
|
|
|
|
|
|
|
|
output_dict = {'detection_classes': np.squeeze(classes).astype(np.int32), 'detection_scores': np.squeeze(scores), 'detection_boxes': np.squeeze(boxes)} |
|
|
|
if 1: |
|
|
|
url = 'https://10.10.26.161:5000/ai' # Set destination URL here |
|
|
|
post_fields = {'img': img_base64,"type":"coco"} # Set POST fields here |
|
|
|
request = Request(url, urlencode(post_fields).encode()) |
|
|
|
data = urlopen(request, context=context).read().decode("ascii") |
|
|
|
output_dict = json.loads(json.loads(data)) |
|
|
|
image_np = load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(img_base64)))) |
|
|
|
else: |
|
|
|
with open('image_1_data.pkl', 'rb') as f: |
|
|
|
output_dict = pickle.load(f) |
|
|
@ -121,10 +111,7 @@ def process_img(img_base64): |
|
|
|
output_dict_processed["detection_scores"].append(output_dict["detection_scores"][box.index]) |
|
|
|
output_dict_processed["detection_boxes"].append(output_dict["detection_boxes"][box.index]) |
|
|
|
|
|
|
|
output_dict_processed["detection_classes"] = np.array( output_dict_processed["detection_classes"]) |
|
|
|
output_dict_processed["detection_scores"] = np.array(output_dict_processed["detection_scores"]) |
|
|
|
output_dict_processed["detection_boxes"] = np.array(output_dict_processed["detection_boxes"]) |
|
|
|
|
|
|
|
people = {} |
|
|
|
for index, i in enumerate(output_dict['detection_classes']): |
|
|
|
score = output_dict['detection_scores'][index] |
|
|
|
if score > MIN_SCORE_THRESH: |
|
|
@ -146,22 +133,36 @@ def process_img(img_base64): |
|
|
|
box = output_dict['detection_boxes'][index] |
|
|
|
(left, right, top, bottom) = (box[1] * im_width, box[3] * im_width, |
|
|
|
box[0] * im_height, box[2] * im_height) |
|
|
|
person = image_np[top:bottom,left:right] |
|
|
|
|
|
|
|
if right-left > bottom-top: |
|
|
|
injured_people += 1 |
|
|
|
face_locs = face_recognition.face_locations(person) |
|
|
|
name = find_name(person) |
|
|
|
people[index] = [0, face_locs, name] |
|
|
|
else: |
|
|
|
face_locs = face_recognition.face_locations(person) |
|
|
|
name = find_name(person) |
|
|
|
people[index] = [1, face_locs, name] |
|
|
|
|
|
|
|
|
|
|
|
_, buffer = cv2.imencode('.jpg', image_np) |
|
|
|
image_process = image_np[:] |
|
|
|
vis_util.visualize_boxes_and_labels_on_image_array( |
|
|
|
image_process, |
|
|
|
output_dict_processed["detection_boxes"], |
|
|
|
output_dict_processed["detection_classes"], |
|
|
|
output_dict_processed["detection_scores"], |
|
|
|
category_index, |
|
|
|
min_score_thresh=MIN_SCORE_THRESH, |
|
|
|
use_normalized_coordinates=True, |
|
|
|
line_thickness=8) |
|
|
|
cv2.imshow("a",image_process) |
|
|
|
cv2.waitKey(0) |
|
|
|
return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people |
|
|
|
# image_process = image_np[:] |
|
|
|
# vis_util.visualize_boxes_and_labels_on_image_array( |
|
|
|
# image_process, |
|
|
|
# output_dict_processed["detection_boxes"], |
|
|
|
# output_dict_processed["detection_classes"], |
|
|
|
# output_dict_processed["detection_scores"], |
|
|
|
# category_index, |
|
|
|
# min_score_thresh=MIN_SCORE_THRESH, |
|
|
|
# use_normalized_coordinates=True, |
|
|
|
# line_thickness=8) |
|
|
|
# cv2.imshow("a",image_process) |
|
|
|
# cv2.waitKey(0) |
|
|
|
|
|
|
|
for i in range(len(output_dict_processed["detection_classes"])): |
|
|
|
output_dict_processed["detection_classes"][i] = category_index[output_dict_processed["detection_classes"][i]]["name"] |
|
|
|
|
|
|
|
return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people,output_dict_processed,people |
|
|
|
|
|
|
|
|
|
|
|
class Crash(Resource): |
|
|
@ -171,7 +172,8 @@ class Crash(Resource): |
|
|
|
id = request.form['id'] |
|
|
|
lat, long = request.form['lat'], request.form['long'] |
|
|
|
|
|
|
|
image, car_count, injured = process_img(base64_img) |
|
|
|
image, car_count, injured,out,people = process_img(base64_img) |
|
|
|
print(people) |
|
|
|
priority = car_count + injured |
|
|
|
if priority > 10: |
|
|
|
priority = 10 |
|
|
@ -187,7 +189,8 @@ class Crash(Resource): |
|
|
|
'location': { |
|
|
|
'latitude': lat, |
|
|
|
'longitude': long |
|
|
|
} |
|
|
|
}, |
|
|
|
"output_dict": out |
|
|
|
} |
|
|
|
if id in crashes: |
|
|
|
crashes[id].append(crash) |
|
|
@ -196,16 +199,28 @@ class Crash(Resource): |
|
|
|
|
|
|
|
with open(db_path, 'w') as f: |
|
|
|
json.dump(crashes, f, indent=4) |
|
|
|
|
|
|
|
cv2.imshow("a",load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(image))))) |
|
|
|
cv2.waitKey(0) |
|
|
|
|
|
|
|
|
|
|
|
return crash |
|
|
|
|
|
|
|
class Crashes(Resource): |
|
|
|
def get(self): |
|
|
|
return crashes |
|
|
|
def post(self): |
|
|
|
process_dict = copy.deepcopy(crashes) |
|
|
|
return_dict = {} |
|
|
|
for id in process_dict: |
|
|
|
for i in range(len(process_dict[id])): |
|
|
|
del process_dict[id][i]["img"] |
|
|
|
|
|
|
|
for id in process_dict: |
|
|
|
for i in range(len(process_dict[id])): |
|
|
|
location = process_dict[id][i]['location'] |
|
|
|
lat, lng = float(request.form['lat']), float(request.form['lng']) |
|
|
|
if abs(float(location['latitude']) - lat) < 0.3 and abs(float(location['longitude']) - lng) < 0.3: |
|
|
|
if id in return_dict: |
|
|
|
return_dict[id].append(process_dict[id][i]) |
|
|
|
else: |
|
|
|
return_dict[id] = [process_dict[id][i]] |
|
|
|
|
|
|
|
return return_dict |
|
|
|
|
|
|
|
|
|
|
|
class Box: |
|
|
|
def __init__(self,coords, type,index): |
|
|
|