Browse Source

Merge branch 'efe' into yigit

yigit
Yiğit Çolakoğlu 6 years ago
parent
commit
7f1f583237
11 changed files with 780 additions and 270 deletions
  1. +18
    -9
      MyCity/app/src/main/java/gq/yigit/mycity/CrashMapFragment.java
  2. +18
    -2
      client_side/app.py
  3. +311
    -0
      client_side/interface/UserData/crashes.html
  4. +1
    -0
      server_side/api/app.py
  5. BIN
      server_side/api/images/9vard12ty0ad2yvwp3q53rsf3h43r2vq.png
  6. +133
    -0
      server_side/api/modules/ai_services.py
  7. +91
    -76
      server_side/api/modules/car_crash.py
  8. +33
    -48
      server_side/api/modules/complaint.py
  9. +1
    -134
      server_side/api/modules/databases/crashes.json
  10. +138
    -1
      server_side/api/modules/databases/users.json
  11. +36
    -0
      server_side/api/modules/user_set.py

+ 18
- 9
MyCity/app/src/main/java/gq/yigit/mycity/CrashMapFragment.java View File

@ -27,7 +27,7 @@ import org.json.JSONException;
import org.json.JSONObject;
public class CrashMapFragment extends Fragment implements WebRequest.responseListener {
public class CrashMapFragment extends Fragment implements WebRequest.responseListener, GoogleMap.OnCameraIdleListener {
// TODO: Rename parameter arguments, choose names that match
// the fragment initialization parameters, e.g. ARG_ITEM_NUMBER
private static final String ARG_PARAM1 = "param1";
@ -88,6 +88,9 @@ public class CrashMapFragment extends Fragment implements WebRequest.responseLis
mMapView.onResume();
activity = this;
FileActions file_manager = new FileActions();
url = file_manager.readFromFile(getContext(),"server.config").trim();
try {
MapsInitializer.initialize(getActivity().getApplicationContext());
} catch (Exception e) {
@ -106,18 +109,11 @@ public class CrashMapFragment extends Fragment implements WebRequest.responseLis
googleMap.setMyLocationEnabled(true);
}
}
markerPoints = new ArrayList<LatLng>();
googleMap.getUiSettings().setCompassEnabled(true);
googleMap.getUiSettings().setMyLocationButtonEnabled(true);
googleMap.getUiSettings().setRotateGesturesEnabled(true);
FileActions file_manager = new FileActions();
url = file_manager.readFromFile(getContext(),"server.config").trim();
String url_crashes = url + "/crashes";
WebRequest data_request = new WebRequest(url_crashes,true,new HashMap<String, String>(),0);
data_request.addListener(activity);
data_request.execute();
googleMap.setOnCameraIdleListener(activity);
CameraPosition cameraPosition = new CameraPosition.Builder().target(new LatLng(39.925533,32.866287)).zoom(13).build();
googleMap.animateCamera(CameraUpdateFactory.newCameraPosition(cameraPosition));
@ -127,8 +123,21 @@ public class CrashMapFragment extends Fragment implements WebRequest.responseLis
return rootView;
}
public void onCameraIdle() {
LatLng center = googleMap.getCameraPosition().target;
String url_crashes = url + "/crashes";
HashMap<String,String> params = new HashMap<>();
params.put("lat", String.valueOf(center.latitude));
params.put("lng", String.valueOf(center.longitude));
WebRequest data_request = new WebRequest(url_crashes,false,params,0);
data_request.addListener(this);
data_request.execute();
}
public void receivedResponse(boolean success,String response, int reqid){
if(success){
markerPoints = new ArrayList<LatLng>();
googleMap.clear();
try{
JSONObject crashes = new JSONObject(response);
Iterator<String> iter = crashes.keys();


+ 18
- 2
client_side/app.py View File

@ -7,7 +7,7 @@ app = Flask(__name__)
app.config['SECRET_KEY'] = 'yigit007'
socketio = SocketIO(app)
src_path , file_list = "../server_side/api/modules/databases/",["denunciations","complaints"]
src_path , file_list = "../server_side/api/modules/databases/",["denunciations","complaints","crashes"]
changed = {}
for file in file_list:
changed[file] = os.stat(os.path.join(src_path,file+".json"))
@ -42,7 +42,7 @@ def handle_my_custom_namespace_event():
@socketio.on("check",namespace="/complaints_socket")
def denunciation_handle(msg):
def compaint_handle(msg):
change,data = file_check("complaints")
if change:
@ -59,6 +59,22 @@ def handle_my_custom_namespace_event():
emit("new", json.dumps(json_data), namespace="/complaints_socket")
@socketio.on('check', namespace='/crashes_socket')
def crash_handle(msg):
change, data = file_check("crashes")
if change:
emit("new", data, namespace='/crashes_socket')
@socketio.on('connect', namespace='/crashes_socket')
def handle_crash():
print("[INFO]: Received socket connection!")
src = os.path.join(src_path, 'crashes.json')
with open(src, 'r') as f:
json_data = json.loads(f.read())
emit("new", json.dumps(json_data), namespace="/crashes_socket")
@app.route('/gui/<path:path>')
def send_img(path):
return send_from_directory('interface/UserData', path)


+ 311
- 0
client_side/interface/UserData/crashes.html View File

@ -0,0 +1,311 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Crashes</title>
<meta name="viewport" content="initial-scale=1.0">
<meta charset="utf-8">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="socket.io/node_modules/socket.io-client/dist/socket.io.js"></script>
<style>
#map {
height: 100%;
}
html, body {
height: 100%;
margin: 0;
padding: 0;
}
</style>
<style>
#img:hover{
color: #424242;
-webkit-transition: all .3s ease-in;
-moz-transition: all .3s ease-in;
-ms-transition: all .3s ease-in;
-o-transition: all .3s ease-in;
transition: all .3s ease-in;
opacity: 1;
transform: scale(1.15);
-ms-transform: scale(1.15); /* IE 9 */
-webkit-transform: scale(1.15); /* Safari and Chrome */
max-width: 400px;
max-height: 400px;
}
#img-user{
max-width: 150px;
max-height: 150px;
}
.title{
text-align: left;
}
td{
padding: 20px 10px;
}
.popup{
display: none; /* Hidden by default */
position: fixed; /* Stay in place */
left: 0;
top: 0;
width: 100%; /* Full width */
height: 100%; /* Full height */
overflow: auto; /* Enable scroll if needed */
background-color: rgb(0,0,0); /* Fallback color */
background-color: rgba(0,0,0,0.4); /* Black w/ opacity */
padding-top: 60px;
}
#table,#table-user{
background: white;
padding: 40px 60px;
}
#content, #user-content{
display: inline-block;
margin-top: 125px;
background: white;
}
#submit{
top: 50%;
transform: translateY(-50%);
}
</style>
</head>
<body>
<audio id="audio" src="http://www.soundjay.com/button/beep-07.wav" autoplay="false"></audio>
<div id="map"></div>
<!-- ANANA SOKIYIM JAVASCRIPT
Sen nası gereksiz bir dilsin amk bütün bi gecemi yedin
ben senenin ebeni sikiyim inşallah seni yazan programcının götüne av tüfeği girer
AYNISI SOCKET.IO IÇIN DE GEÇERLİ ALLAHIM KAFAYI YİCEM ADAMLAR HERALDE "ACABA NE KADAR ÖZÜRLÜ BI FRAMERWORK YAZARIZ DUYE BAŞLAMIŞLAR IŞE"-->
<div id = "popup" align = "center" class = "popup">
<div id = "content">
<form action="." id="submission_form">
<table border="0" cellpadding="10" cellspacing="0" id = "table" class = "modal">
<tr>
<td rowspan="2" colspan="7"><img src="cityscape.png" alt="img"id = "img"></td>
<td colspan="2" class="title"><strong>Priority: </strong></td>
<td colspan="1" id = "priority">7</td>
</tr>
<tr>
<td colspan="2" class="title"><strong>Type: </strong></td>
<td colspan="1" id = "type"> Ambulance</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>Submitter: </strong></td>
<td colspan="6">
<a href="#" id = "user" class = "panel" onclick=
"document.getElementById('popup').style.display='none';
document.getElementById('user-popup').style.display='block';
setUser(this.getAttribute('userid'))"
>Efe Aydın </a>
</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>Message: </strong></td>
<td colspan="6" id = "message">Test ambulance</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>Tags: </strong></td>
<td colspan="6" id = "tags">Test tags</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>Message: </strong></td>
<td colspan="6" id = "input"> <textarea name="message" rows="5" cols="30" id="response">Please enter response.</textarea>
</td>
</tr>
<tr>
<td colspan="10">
<input type="submit" id = "submit">
</td>
</tr>
</table>
</form>
</div>
</div>
<div id = "user-popup" align = "center" class="popup">
<div id = "user-content">
<table border="0" cellpadding="10" cellspacing="0" id = "table-user" class = "modal">
<tr>
<td rowspan="3" colspan="7"><img src="cityscape.png" alt="img"id = "img-user"></td>
<td colspan="2" class="title"><strong>Name: </strong></td>
<td colspan="1" id = "name">Efe Aydın</td>
</tr>
<tr>
<td colspan="2" class="title"><strong>TC: </strong></td>
<td colspan="1" id = "tc">111111111111</td>
</tr>
<tr>
<td colspan="2" class="title"><strong>Trust: </strong></td>
<td colspan="1" id = "trust">7</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>Phone: </strong></td>
<td colspan="6" id="phone">
0 555 555 55 55
</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>E-mail: </strong></td>
<td colspan="6" id = "email">efeaydin@citizenview.com</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>Health: </strong></td>
<td colspan="6" id = "health">Test tags</td>
</tr>
</table>
<button onclick=
"document.getElementById('popup').style.display='block';
document.getElementById('user-popup').style.display='none';"
style="margin-bottom: 5px;">OK</button>
</div>
</div>
<script>
function setUser(id) {
if(!(id in users)) {
$.get("https://" + document.domain + ":5000/users/" + id, function (dataStr, status) {
data_user = $.parseJSON(dataStr)
users[id]=data_user
document.getElementById("name").innerHTML = users[id]["realname"];
document.getElementById("tc").innerHTML = users[id]["TC"];
document.getElementById("trust").innerHTML = users[id]["trustability"];
document.getElementById("phone").innerHTML = users[id]["tel"];
document.getElementById("email").innerHTML = users[id]["email"];
document.getElementById("img-user").setAttribute("src","https://" + document.domain + ":5000"+users[id]["avatar"])
});
}else {
document.getElementById("name").innerHTML = users[id]["realname"];
document.getElementById("tc").innerHTML = users[id]["TC"];
document.getElementById("trust").innerHTML = users[id]["trustability"];
document.getElementById("phone").innerHTML = users[id]["tel"];
document.getElementById("email").innerHTML = users[id]["email"];
document.getElementById("img-user").setAttribute("src","https://" + document.domain + ":5000" + users[id]["avatar"])
}
}
</script>
<script>
var modal = document.getElementById('popup');
window.onclick = function(event) {
if (event.target == modal) {
modal.style.display = "none";
}
}
</script>
<script>
var map;
function initMap() {
map = new google.maps.Map(document.getElementById('map'), {
center: {lat: 39.92, lng: 32.85},
zoom: 13
});
}
</script>
<script>
var sound = document.getElementById("audio");
var markers = [];
var socket = io.connect('http://' + document.domain + ':' + location.port + "/crashes_socket");
console.log('http://' + document.domain + ":4000/crashes_socket")
console.log(socket);
var markers = [];
var denunciation_count = markers.length;
var users = {};
setInterval(function () {
socket.emit("check",{})
},1000);
var LatLng = null;
socket.on("new",function (dataStr) {
for (var i = 0; i < markers.length; i++) {
markers.pop().setMap(null);
}
console.log("Update received!")
var data = $.parseJSON(dataStr);
for(var key in data) {
console.log("aaa aaa")
data[key].forEach(function (element,index) {
console.log("aaa bbb")
if(element["location"]["longitude"] === undefined || element["location"]["latitude"] === undefined){
return;
}
console.log("aaa ccc")
LatLng = {lat: parseFloat(element["location"]["latitude"]), lng: parseFloat(element["location"]["longitude"])};
console.log("aaa ddd")
var marker = new google.maps.Marker({
position: {lat: parseFloat(element["location"]["latitude"]), lng: parseFloat(element["location"]["longitude"])},
map: map,
label: element["priority"].toString(),
title: element["message"]
});
console.log("aaa eee")
marker.addListener('click', function () {
document.getElementById('popup').style.display = 'block';
document.getElementById("priority").innerHTML = element["response"]["priority"];
if(element["response"]["status"]){
document.getElementById("response").innerHTML = element["response"]["message"];
}
document.getElementById("submission_form").setAttribute("index",index);
document.getElementById("submission_form").setAttribute("id",key);
document.getElementById("type").innerHTML = "Road Damage";
document.getElementById("message").innerHTML = element["content"];
if (!(key in users)) {
$.get("https://" + document.domain + ":5000/users/" + key, function (dataStr, status) {
data_user = $.parseJSON(dataStr);
users[key] = data_user;
document.getElementById("user").innerHTML = users[key]["realname"];
});
} else {
document.getElementById("user").innerHTML = users[key]["realname"];
}
document.getElementById("user").setAttribute("userid", key);
document.getElementById("img").setAttribute("src", "data:image/png;base64," + element["img"])
});
});
}
map.setCenter(LatLng);
if (denunciation_count < markers.length) {
sound.play();
}
denunciation_count = markers.length;
});
</script>
<script src="https://maps.googleapis.com/maps/api/js?key=AIzaSyBuOC03IHPA_6TPnfk18b0SAgD1uge4-dk&callback=initMap" async defer></script>
</body>
</html>

+ 1
- 0
server_side/api/app.py View File

@ -4,6 +4,7 @@ from flask_cors import CORS, cross_origin
from modules import user_info, voting_system, rating_system, denunciation, navigation, bus_stops, announcements, complaint, car_crash
from modules import utility
from modules import user_set
app = Flask(__name__)
api = Api(app)


BIN
server_side/api/images/9vard12ty0ad2yvwp3q53rsf3h43r2vq.png View File

Before After
Width: 512  |  Height: 512  |  Size: 16 KiB

+ 133
- 0
server_side/api/modules/ai_services.py View File

@ -0,0 +1,133 @@
from flask import Flask, request, Response
from flask_restful import Resource, Api
import os
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import ops as utils_ops
from PIL import Image
import base64
import io
import json
import tensorflow as tf
import sys
import numpy as np
switches = {"coco":1, "damage":1}
COCO_MODEL_NAME = "rfcn_resnet101_coco_2018_01_28"
PATH_TO_FROZEN_COCO_GRAPH = 'modules/'+COCO_MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_FROZEN_DAMAGE_GRAPH = 'modules/trainedModels/ssd_mobilenet_RoadDamageDetector.pb'
if sys.platform == "win32":
detection_graph_coco = tf.Graph()
detection_graph_damage = tf.Graph()
with detection_graph_coco.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_COCO_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with detection_graph_damage.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_DAMAGE_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(image, graph,type):
global switches
global sess_coco
global sess_damage
with graph.as_default():
if(switches[type]):
if type == "coco":
sess_coco = tf.Session()
elif type == "damage":
sess_damage = tf.Session()
switches[type] = 0
if type == "coco":
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[1], image.shape[2])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess_coco.run(tensor_dict,
feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
elif type=="damage":
image_tensor = graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = graph.get_tensor_by_name('detection_scores:0')
detection_classes = graph.get_tensor_by_name('detection_classes:0')
num_detections = graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num) = sess_damage.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image})
output_dict = {'detection_classes': np.squeeze(classes).astype(np.int32), 'detection_scores': np.squeeze(scores)}
return output_dict
class Process(Resource):
def post(self):
base64_img = request.form['img']
image = Image.open(io.BytesIO(base64.b64decode(base64_img)))
type = request.form["type"]
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
if type == "coco":
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph_coco,type)
elif type == "damage":
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph_damage,type)
return json.dumps(output_dict,cls=NumpyEncoder)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)

+ 91
- 76
server_side/api/modules/car_crash.py View File

@ -13,10 +13,12 @@ import os
import io
import itertools
import pickle
from object_detection.utils import visualization_utils as vis_util
import copy
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import ssl
from object_detection.utils import label_map_util
import face_recognition
VEHICLE_CLASSES = [3, 6, 8]
MIN_AREA_RATIO = 0.9
@ -25,10 +27,12 @@ import numpy as np
MIN_SCORE_THRESH = 0.6
if sys.platform == "win32":
import tensorflow as tf
sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer')
PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt')
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
app = Flask(__name__)
api = Api(app)
@ -40,53 +44,39 @@ with open(db_path, 'r') as f:
users_path = os.path.join(app.root_path, 'databases', 'users.json')
with open(users_path, 'r') as f:
users = json.load(f)
PATH_TO_LABELS = '../../traffic_analyzer/object_detection/data/mscoco_label_map.pbtxt'
PATH_TO_CKPT = '../../traffic_analyzer/rfcn_resnet101_coco_2018_01_28/frozen_inference_graph.pb'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
if sys.platform == "win32":
# PATH_TO_LABELS = '../../traffic_analyzer/object_detection/data/kitti_label_map.pbtxt'
# PATH_TO_CKPT = 'modules/faster_rcnn_resnet101_kitti_2018_01_28/frozen_inference_graph.pb'
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
context = ssl._create_unverified_context()
def find_name(image):
known_faces = []
known_face_names = []
for v in users.values():
known_faces.append(np.array(v['face_encoding']))
known_face_names.append(v['realname'])
face_encoding = face_recognition.face_encodings(image)[0]
results = face_recognition.compare_faces(known_faces, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_faces, face_encoding)
best_match_index = np.argmin(face_distances)
if results[best_match_index]:
name = known_face_names[best_match_index]
return name
def process_img(img_base64):
if sys.platform == "win32":
img = Image.open(io.BytesIO(base64.b64decode(img_base64)))
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(img)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
output_dict = {'detection_classes': np.squeeze(classes).astype(np.int32), 'detection_scores': np.squeeze(scores), 'detection_boxes': np.squeeze(boxes)}
if 1:
url = 'https://10.10.26.161:5000/ai' # Set destination URL here
post_fields = {'img': img_base64,"type":"coco"} # Set POST fields here
request = Request(url, urlencode(post_fields).encode())
data = urlopen(request, context=context).read().decode("ascii")
output_dict = json.loads(json.loads(data))
image_np = load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(img_base64))))
else:
with open('image_1_data.pkl', 'rb') as f:
output_dict = pickle.load(f)
@ -121,10 +111,7 @@ def process_img(img_base64):
output_dict_processed["detection_scores"].append(output_dict["detection_scores"][box.index])
output_dict_processed["detection_boxes"].append(output_dict["detection_boxes"][box.index])
output_dict_processed["detection_classes"] = np.array( output_dict_processed["detection_classes"])
output_dict_processed["detection_scores"] = np.array(output_dict_processed["detection_scores"])
output_dict_processed["detection_boxes"] = np.array(output_dict_processed["detection_boxes"])
people = {}
for index, i in enumerate(output_dict['detection_classes']):
score = output_dict['detection_scores'][index]
if score > MIN_SCORE_THRESH:
@ -146,22 +133,36 @@ def process_img(img_base64):
box = output_dict['detection_boxes'][index]
(left, right, top, bottom) = (box[1] * im_width, box[3] * im_width,
box[0] * im_height, box[2] * im_height)
person = image_np[top:bottom,left:right]
if right-left > bottom-top:
injured_people += 1
face_locs = face_recognition.face_locations(person)
name = find_name(person)
people[index] = [0, face_locs, name]
else:
face_locs = face_recognition.face_locations(person)
name = find_name(person)
people[index] = [1, face_locs, name]
_, buffer = cv2.imencode('.jpg', image_np)
image_process = image_np[:]
vis_util.visualize_boxes_and_labels_on_image_array(
image_process,
output_dict_processed["detection_boxes"],
output_dict_processed["detection_classes"],
output_dict_processed["detection_scores"],
category_index,
min_score_thresh=MIN_SCORE_THRESH,
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow("a",image_process)
cv2.waitKey(0)
return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people
# image_process = image_np[:]
# vis_util.visualize_boxes_and_labels_on_image_array(
# image_process,
# output_dict_processed["detection_boxes"],
# output_dict_processed["detection_classes"],
# output_dict_processed["detection_scores"],
# category_index,
# min_score_thresh=MIN_SCORE_THRESH,
# use_normalized_coordinates=True,
# line_thickness=8)
# cv2.imshow("a",image_process)
# cv2.waitKey(0)
for i in range(len(output_dict_processed["detection_classes"])):
output_dict_processed["detection_classes"][i] = category_index[output_dict_processed["detection_classes"][i]]["name"]
return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people,output_dict_processed,people
class Crash(Resource):
@ -171,7 +172,8 @@ class Crash(Resource):
id = request.form['id']
lat, long = request.form['lat'], request.form['long']
image, car_count, injured = process_img(base64_img)
image, car_count, injured,out,people = process_img(base64_img)
print(people)
priority = car_count + injured
if priority > 10:
priority = 10
@ -187,7 +189,8 @@ class Crash(Resource):
'location': {
'latitude': lat,
'longitude': long
}
},
"output_dict": out
}
if id in crashes:
crashes[id].append(crash)
@ -196,16 +199,28 @@ class Crash(Resource):
with open(db_path, 'w') as f:
json.dump(crashes, f, indent=4)
cv2.imshow("a",load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(image)))))
cv2.waitKey(0)
return crash
class Crashes(Resource):
def get(self):
return crashes
def post(self):
process_dict = copy.deepcopy(crashes)
return_dict = {}
for id in process_dict:
for i in range(len(process_dict[id])):
del process_dict[id][i]["img"]
for id in process_dict:
for i in range(len(process_dict[id])):
location = process_dict[id][i]['location']
lat, lng = float(request.form['lat']), float(request.form['lng'])
if abs(float(location['latitude']) - lat) < 0.3 and abs(float(location['longitude']) - lng) < 0.3:
if id in return_dict:
return_dict[id].append(process_dict[id][i])
else:
return_dict[id] = [process_dict[id][i]]
return return_dict
class Box:
def __init__(self,coords, type,index):


+ 33
- 48
server_side/api/modules/complaint.py View File

@ -8,6 +8,10 @@ from PIL import Image
import sys
import datetime
import cv2
import ssl
from urllib.parse import urlencode
from urllib.request import Request, urlopen
if sys.platform == "win32":
import tensorflow as tf
@ -15,13 +19,15 @@ if sys.platform == "win32":
import pickle
sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer')
from utils import label_map_util
from object_detection.utils import label_map_util
from utils import visualization_utils as vis_util
from object_detection.utils import visualization_utils as vis_util
app = Flask(__name__)
api = Api(app)
context = ssl._create_unverified_context()
score_dict = {
1: 1,
2: 1,
@ -38,21 +44,11 @@ with open("modules/databases/complaints.json","r") as f:
if sys.platform == "win32":
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = 'modules/trainedModels/ssd_mobilenet_RoadDamageDetector.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = 'modules/trainedModels/crack_label_map.pbtxt'
NUM_CLASSES = 8
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
@ -65,39 +61,26 @@ def load_image_into_numpy_array(image):
def process_img(img_base64):
if sys.platform == "win32":
img = Image.open(io.BytesIO(base64.b64decode(img_base64)))
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(img)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
min_score_thresh=0.3,
use_normalized_coordinates=True,
line_thickness=8)
output_dict = {'detection_classes': np.squeeze(classes).astype(np.int32), 'detection_scores': np.squeeze(scores)}
url = 'https://127.0.0.1:5000/ai' # Set destination URL here
post_fields = {'img': img_base64,"type":"damage"} # Set POST fields here
request = Request(url, urlencode(post_fields).encode())
img = load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(img_base64))))
output_dict = json.loads(json.loads(urlopen(request, context=context).read()))
print(output_dict)
vis_util.visualize_boxes_and_labels_on_image_array(
img,
np.array(output_dict['detection_boxes']),
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.3
)
defects = []
for index, i in enumerate(output_dict['detection_classes']):
score = output_dict['detection_scores'][index]
@ -111,8 +94,11 @@ def process_img(img_base64):
if priority > 10:
priority = 10
_, buffer = cv2.imencode('.jpg', image_np)
return base64.b64encode(buffer).decode('ascii'),priority,defects
buffered = io.BytesIO()
img = Image.fromarray(img, 'RGB')
img.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue())
return img_str.decode("ascii"),priority,defects
return img_base64, 7,["unprocessed"]
@ -155,4 +141,3 @@ class ComplaintsUpdate(Resource):
with open('modules/databases/complaints.json', 'w') as complaints_file:
json.dump(complaints, complaints_file, indent=4)
return

+ 1
- 134
server_side/api/modules/databases/crashes.json
File diff suppressed because it is too large
View File


+ 138
- 1
server_side/api/modules/databases/users.json
File diff suppressed because it is too large
View File


+ 36
- 0
server_side/api/modules/user_set.py View File

@ -0,0 +1,36 @@
import os
import sys
import json
import base64
import face_recognition
from modules import utils
with open('modules/databases/users.json') as f:
users = json.load(f)
for file in os.listdir("images"):
if file.endswith(".png") or file.endswith(".jpg"):
uid = file.split('.')[0]
if len(uid) == 32 and utils.find_by_id(users.values(), uid):
full_path = os.path.join("images", file)
image = face_recognition.load_image_file(full_path)
with open(full_path, 'rb') as f:
base64_image = base64.b64encode(f.read())
if sys.platform == "win32":
face_locations = face_recognition.face_locations(image, model="cnn")[0]
else:
face_locations = face_recognition.face_locations(image)[0]
face_encoding = face_recognition.face_encodings(image)[0]
for k in users.keys():
if users[k]['id'] == uid:
users[k]['image'] = base64_image.decode()
users[k]['face_locations'] = face_locations
users[k]['face_encoding'] = list(face_encoding)
with open('modules/databases/users.json', 'w') as f:
users = json.dump(users, f, indent=4)
os.remove(full_path)

Loading…
Cancel
Save