diff --git a/server_side/api/modules/car_crash.py b/server_side/api/modules/car_crash.py index f62b07a..9f6ba84 100644 --- a/server_side/api/modules/car_crash.py +++ b/server_side/api/modules/car_crash.py @@ -51,8 +51,55 @@ if sys.platform == "win32": return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) -def process_img(img): - pass +def process_img(img_base64): + if sys.platform == "win32": + img = Image.open(io.BytesIO(base64.b64decode(img_base64))) + with detection_graph.as_default(): + with tf.Session(graph=detection_graph) as sess: + # Definite input and output Tensors for detection_graph + image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') + # Each box represents a part of the image where a particular object was detected. + detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') + # Each score represent how level of confidence for each of the objects. + # Score is shown on the result image, together with the class label. + detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') + detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') + num_detections = detection_graph.get_tensor_by_name('num_detections:0') + # the array based representation of the image will be used later in order to prepare the + # result image with boxes and labels on it. + image_np = load_image_into_numpy_array(img) + # Expand dimensions since the model expects images to have shape: [1, None, None, 3] + image_np_expanded = np.expand_dims(image_np, axis=0) + # Actual detection. + (boxes, scores, classes, num) = sess.run( + [detection_boxes, detection_scores, detection_classes, num_detections], + feed_dict={image_tensor: image_np_expanded}) + # Visualization of the results of a detection. + vis_util.visualize_boxes_and_labels_on_image_array( + image_np, + np.squeeze(boxes), + np.squeeze(classes).astype(np.int32), + np.squeeze(scores), + category_index, + min_score_thresh=0.3, + use_normalized_coordinates=True, + line_thickness=8) + + output_dict = {'detection_classes': classes, 'detection_scores': scores[0], 'detection_boxes': boxes} + cars_involved = 0 + injured_people = 0 + for i in output_dict['detection_classes']: + index = np.where(output_dict['detection_classes'] == i)[0][0] + score = output_dict['detection_scores'][index] + if score > 0.3: + if output_dict['detection_classes'] == 1: + cars_involved += 1 + else: + pass + + return base64.b64encode(pickle.dumps(image_np)).decode('ascii'), cars_involved, injured_people + + return img_base64, 7, ["unprocessed"] class Crash(Resource): def post(self): @@ -60,7 +107,7 @@ class Crash(Resource): base64_img = request.form['img'] id = request.form['id'] - process_img(Image.open(io.BytesIO(base64.b64decode(base64_img)))) + process_img(base64_img) return id