Browse Source

Merge remote-tracking branch 'origin/yigit' into efe

yigit
Efe Aydın 6 years ago
parent
commit
97102d77c1
17 changed files with 319 additions and 1326 deletions
  1. +1
    -1
      client_side/interface/UserData/complaints.html
  2. +79
    -52
      client_side/interface/UserData/crashes.html
  3. +1
    -1
      server_side/api/modules/SpotSelector.py
  4. +132
    -140
      server_side/api/modules/car_crash.py
  5. +2
    -2
      server_side/api/modules/complaint.py
  6. +16
    -15
      server_side/api/modules/databases/complaints.json
  7. +43
    -996
      server_side/api/modules/databases/crashes.json
  8. +1
    -1
      server_side/api/modules/denunciation.py
  9. +3
    -3
      server_side/api/modules/navigation.py
  10. +2
    -2
      server_side/api/modules/rating_system.py
  11. +1
    -1
      server_side/api/modules/smart_park.py
  12. +2
    -2
      server_side/api/modules/user_info.py
  13. +1
    -1
      server_side/api/modules/user_set.py
  14. +2
    -2
      server_side/api/modules/voting_system.py
  15. +29
    -103
      traffic_analyzer/ambulance_detect.py
  16. +2
    -2
      traffic_analyzer/images/train_image_taker.py
  17. +2
    -2
      traffic_analyzer/object_detection/data/mscoco_label_map.pbtxt

+ 1
- 1
client_side/interface/UserData/complaints.html View File

@ -206,7 +206,7 @@
document.getElementById("trust").innerHTML = users[id]["trustability"];
document.getElementById("phone").innerHTML = users[id]["tel"];
document.getElementById("email").innerHTML = users[id]["email"];
document.getElementById("img-user").setAttribute("src","https://" + document.domain + ":5000" + users[id]["avatar"])
document.getElementById("img-user").setAttribute("src","data:image/png;base64," + users[id]["image"])
}
}
function sendUpdate(form) {


+ 79
- 52
client_side/interface/UserData/crashes.html View File

@ -21,21 +21,6 @@
<style>
#img:hover{
color: #424242;
-webkit-transition: all .3s ease-in;
-moz-transition: all .3s ease-in;
-ms-transition: all .3s ease-in;
-o-transition: all .3s ease-in;
transition: all .3s ease-in;
opacity: 1;
transform: scale(1.15);
-ms-transform: scale(1.15); /* IE 9 */
-webkit-transform: scale(1.15); /* Safari and Chrome */
max-width: 400px;
max-height: 400px;
}
#img-user{
max-width: 150px;
max-height: 150px;
@ -69,7 +54,6 @@
#content, #user-content{
display: inline-block;
margin-top: 125px;
background: white;
}
#submit{
@ -95,18 +79,16 @@
<div id = "popup" align = "center" class = "popup">
<div id = "content">
<form action="." id="submission_form">
<table border="0" cellpadding="10" cellspacing="0" id = "table" class = "modal">
<canvas id="canvas" width="400" height="400" style="margin-top: 30px;">
</canvas>
<table border="0" cellpadding="10" cellspacing="0" id = "table" class = "modal">
<tr>
<td rowspan="2" colspan="7"><img src="cityscape.png" alt="img"id = "img"></td>
<td colspan="2" class="title"><strong>Priority: </strong></td>
<td colspan="1" id = "priority">7</td>
<td colspan="4" class="title"><strong>Priority: </strong></td>
<td colspan="6" id = "priority">7</td>
</tr>
<tr>
<td colspan="2" class="title"><strong>Type: </strong></td>
<td colspan="1" id = "type"> Ambulance</td>
<td colspan="4" class="title"><strong>Type: </strong></td>
<td colspan="6" id = "type"> Ambulance</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>Submitter: </strong></td>
@ -127,18 +109,9 @@
<td colspan="4" class="title"><strong>Tags: </strong></td>
<td colspan="6" id = "tags">Test tags</td>
</tr>
<tr>
<td colspan="4" class="title"><strong>Message: </strong></td>
<td colspan="6" id = "input"> <textarea name="message" rows="5" cols="30" id="response">Please enter response.</textarea>
</td>
</tr>
<tr>
<td colspan="10">
<input type="submit" id = "submit">
</td>
</tr>
</table>
</form>
</div>
@ -206,7 +179,7 @@
document.getElementById("trust").innerHTML = users[id]["trustability"];
document.getElementById("phone").innerHTML = users[id]["tel"];
document.getElementById("email").innerHTML = users[id]["email"];
document.getElementById("img-user").setAttribute("src","https://" + document.domain + ":5000" + users[id]["avatar"])
document.getElementById("img-user").setAttribute("src","data:image/png;base64," + users[id]["image"])
}
}
</script>
@ -232,6 +205,7 @@
</script>
<script>
var class_color_dict = {"person":"blue","car":"yellow"}
var sound = document.getElementById("audio");
var markers = [];
@ -254,32 +228,22 @@
var data = $.parseJSON(dataStr);
console.log(data);
for(var key in data) {
console.log("aaa aaa");
data[key].forEach(function (element,index) {
console.log("aaa bbb");
if(element["location"]["longitude"] === undefined || element["location"]["latitude"] === undefined){
return;
}
console.log("aaa ccc");
LatLng = {lat: parseFloat(element["location"]["latitude"]), lng: parseFloat(element["location"]["longitude"])};
console.log("aaa ddd");
var marker = new google.maps.Marker({
position: {lat: parseFloat(element["location"]["latitude"]), lng: parseFloat(element["location"]["longitude"])},
map: map,
label: element["priority"].toString(),
title: element["message"]
});
console.log("aaa eee");
marker.addListener('click', function () {
document.getElementById('popup').style.display = 'block';
document.getElementById("priority").innerHTML = element["response"]["priority"];
if(element["response"]["status"]){
document.getElementById("response").innerHTML = element["response"]["message"];
}
document.getElementById("submission_form").setAttribute("index",index);
document.getElementById("submission_form").setAttribute("id",key);
document.getElementById("type").innerHTML = "Road Damage";
document.getElementById("message").innerHTML = element["content"];
document.getElementById("priority").innerHTML = element["priority"];
document.getElementById("type").innerHTML = "Car Crash";
document.getElementById("message").innerHTML = element["message"];
if (!(key in users)) {
$.get("https://" + document.domain + ":5000/users/" + key, function (dataStr, status) {
@ -291,7 +255,57 @@
document.getElementById("user").innerHTML = users[key]["realname"];
}
document.getElementById("user").setAttribute("userid", key);
document.getElementById("img").setAttribute("src", "data:image/png;base64," + element["img"])
var canvas = document.getElementById("canvas");
var ctx = canvas.getContext("2d");
var image = new Image();
var rects = []
image.onload = function() {
if(image.height > image.width){
XY_RATIO = canvas.height/image.height
canvas.width = image.width * XY_RATIO
}else{
XY_RATIO = canvas.width/image.width
canvas.height = image.height * XY_RATIO
}
ctx.drawImage(image, 0, 0, image.width, image.height, // source rectangle
0, 0, canvas.width, canvas.height);
for(i=0;i<element["output_dict"]["detection_classes"].length;i++){
var box = element["output_dict"]["detection_boxes"][i];
ctx.beginPath();
ctx.lineWidth = "2";
var box_class = element["output_dict"]["detection_classes"][i];
ctx.strokeStyle = class_color_dict[box_class];
ctx.rect(box[0]*XY_RATIO,box[2]*XY_RATIO,Math.abs(box[1]-box[0])*XY_RATIO,Math.abs(box[3]-box[2])*XY_RATIO);
ctx.stroke()
}
for(i=0;i<element["people"].length;i++){
ctx.beginPath();
var person = element["people"][i]
box = person[1]
ctx.strokeStyle = "green";
if(person[0]){
ctx.strokeStyle = "red";
}
ctx.rect(box[3]*XY_RATIO,box[0]*XY_RATIO,(box[1]-box[3])*XY_RATIO,(box[0]-box[2])*XY_RATIO);
ctx.stroke();
rects.push({"x":box[3]*XY_RATIO,"y":box[0]*XY_RATIO,"w":(box[1]-box[3])*XY_RATIO,"h":(box[0]-box[2])*XY_RATIO,"id":person[2]})
}
};
image.src = "data:image/png;base64," + element["img"]
canvas.addEventListener('click', function(e) {
console.log('click: ' + e.offsetX + '/' + e.offsetY);
console.log(rects)
var rect = collides(rects, e.offsetX, e.offsetY);
if (rect) {
document.getElementById('popup').style.display='none';
document.getElementById('user-popup').style.display='block';
setUser(rect.id);
}
}, false);
});
});
@ -303,7 +317,20 @@
}
denunciation_count = markers.length;
});
function collides(rects, x, y) {
var isCollision = false;
for (var i = 0, len = rects.length; i < len; i++) {
var left = rects[i].x, right = rects[i].x+rects[i].w;
var top = rects[i].y, bottom = rects[i].y+rects[i].h;
if (right >= x
&& left <= x
&& bottom >= y
&& top <= y) {
isCollision = rects[i];
}
}
return isCollision;
}
</script>
<script src="https://maps.googleapis.com/maps/api/js?key=AIzaSyBuOC03IHPA_6TPnfk18b0SAgD1uge4-dk&callback=initMap" async defer></script>


+ 1
- 1
server_side/api/modules/SpotSelector.py View File

@ -48,7 +48,7 @@ for i in range(len(rects)):
}
with open("databases/locations.json","w") as f:
f.write(json.dumps(locs,indent=4))
f.write(json.dumps(locs,indent=2))


+ 132
- 140
server_side/api/modules/car_crash.py View File

@ -20,6 +20,8 @@ import ssl
from object_detection.utils import label_map_util
import face_recognition
AI_IP = '10.10.26.161'
VEHICLE_CLASSES = [3, 6, 8]
MIN_AREA_RATIO = 0.9
import numpy as np
@ -27,7 +29,7 @@ import numpy as np
MIN_SCORE_THRESH = 0.6
if sys.platform == "win32":
sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer')
sys.path.insert(0, r'C:\Users\Tednokent01\Downloads\MyCity\traffic_analyzer')
PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt')
@ -39,38 +41,38 @@ api = Api(app)
db_path = os.path.join(app.root_path, 'databases', 'crashes.json')
with open(db_path, 'r') as f:
crashes = json.load(f)
crashes = json.load(f)
users_path = os.path.join(app.root_path, 'databases', 'users.json')
with open(users_path, 'r') as f:
users = json.load(f)
users = json.load(f)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
context = ssl._create_unverified_context()
def find_name(image):
try:
known_faces = []
known_face_names = []
for v in users.values():
known_faces.append(np.array(v['face_encoding']))
known_face_names.append(v['id'])
face_encoding = face_recognition.face_encodings(image)[0]
results = face_recognition.compare_faces(known_faces, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_faces, face_encoding)
best_match_index = np.argmin(face_distances)
if results[best_match_index]:
name = known_face_names[best_match_index]
return name
except:
return None
try:
known_faces = []
known_face_names = []
for v in users.values():
known_faces.append(np.array(v['face_encoding']))
known_face_names.append(v['id'])
face_encoding = face_recognition.face_encodings(image)[0]
results = face_recognition.compare_faces(known_faces, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(known_faces, face_encoding)
best_match_index = np.argmin(face_distances)
if results[best_match_index]:
name = known_face_names[best_match_index]
return name
except:
return None
def rotate_img(img, angle):
if angle == 90:
@ -86,34 +88,33 @@ def process_img(img_base64):
output_dict = json.loads(json.loads(data))
image_np = cv2.cvtColor(load_image_into_numpy_array(Image.open(io.BytesIO(base64.b64decode(img_base64)))),cv2.COLOR_RGB2BGR)
output_dict_processed = {"detection_classes":[], "detection_scores":[], "detection_boxes":[]}
im_height, im_width, _ = image_np.shape
cars_involved = 0
injured_people = 0
prev_cars = []
boxes = []
spam_boxes = []
for index, i in enumerate(output_dict['detection_classes']):
score = output_dict['detection_scores'][index]
if score > MIN_SCORE_THRESH:
if i in VEHICLE_CLASSES:
box = output_dict['detection_boxes'][index]
boxes.append(Box((box[1] * im_width, box[3] * im_width,
box[0] * im_height, box[2] * im_height),
i,index))
box_combinations = itertools.combinations(boxes,r=2)
for combination in box_combinations:
big = combination[0].get_bigger(combination[1])
if big and not big in spam_boxes:
spam_boxes.append(big)
for spam in spam_boxes:
boxes.remove(spam)
for box in boxes:
output_dict_processed["detection_classes"].append(output_dict["detection_classes"][box.index])
output_dict_processed["detection_scores"].append(output_dict["detection_scores"][box.index])
output_dict_processed["detection_boxes"].append(output_dict["detection_boxes"][box.index])
output_dict_processed = {"detection_classes":[], "detection_scores":[], "detection_boxes":[]}
im_height, im_width, _ = image_np.shape
cars_involved = 0
injured_people = 0
prev_cars = []
boxes = []
spam_boxes = []
for index, i in enumerate(output_dict['detection_classes']):
score = output_dict['detection_scores'][index]
if score > MIN_SCORE_THRESH:
box = output_dict['detection_boxes'][index]
boxes.append(Box((box[1] * im_width, box[3] * im_width,
box[0] * im_height, box[2] * im_height),
i,index))
box_combinations = itertools.combinations(boxes,r=2)
for combination in box_combinations:
big = combination[0].get_bigger(combination[1])
if big and not big in spam_boxes:
spam_boxes.append(big)
for spam in spam_boxes:
boxes.remove(spam)
for box in boxes:
output_dict_processed["detection_classes"].append(output_dict["detection_classes"][box.index])
output_dict_processed["detection_scores"].append(output_dict["detection_scores"][box.index])
output_dict_processed["detection_boxes"].append(output_dict["detection_boxes"][box.index])
people = {}
for index, i in enumerate(output_dict['detection_classes']):
@ -169,32 +170,22 @@ def process_img(img_base64):
_, buffer = cv2.imencode('.jpg', image_np)
# image_process = image_np[:]
# vis_util.visualize_boxes_and_labels_on_image_array(
# image_process,
# output_dict_processed["detection_boxes"],
# output_dict_processed["detection_classes"],
# output_dict_processed["detection_scores"],
# category_index,
# min_score_thresh=MIN_SCORE_THRESH,
# use_normalized_coordinates=True,
# line_thickness=8)
# cv2.imshow("a",image_process)
# cv2.waitKey(0)
_, buffer = cv2.imencode('.jpg', image_np)
for i in range(len(output_dict_processed["detection_classes"])):
box = output_dict_processed["detection_boxes"][i]
output_dict_processed["detection_boxes"][i] = [box[1] * im_width, box[3] * im_width, box[0] * im_height, box[2] * im_height]
for i in range(len(output_dict_processed["detection_classes"])):
output_dict_processed["detection_classes"][i] = category_index[output_dict_processed["detection_classes"][i]]["name"]
output_dict_processed["detection_classes"][i] = category_index[output_dict_processed["detection_classes"][i]]["name"]
return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people,output_dict_processed,people
return base64.b64encode(buffer).decode('ascii'), cars_involved, injured_people,output_dict_processed,people
class Crash(Resource):
def post(self):
message = request.form['message']
base64_img = request.form['img']
id = request.form['id']
lat, long = request.form['lat'], request.form['long']
def post(self):
message = request.form['message']
base64_img = request.form['img']
id = request.form['id']
lat, long = request.form['lat'], request.form['long']
image, car_count, injured,out,people = process_img(base64_img)
(top, right, bottom, left) = people[0][1]
@ -212,78 +203,79 @@ class Crash(Resource):
if priority > 10:
priority = 10
crash = {
'img': image,
'message': message,
'priority': priority,
'stats': {
'cars': car_count,
'injured': injured
},
'location': {
'latitude': lat,
'longitude': long
},
"output_dict": out
}
if id in crashes:
crashes[id].append(crash)
else:
crashes[id] = [crash]
with open(db_path, 'w') as f:
json.dump(crashes, f, indent=4)
return crash
crash = {
'img': image,
'message': message,
'priority': priority,
'stats': {
'cars': car_count,
'injured': injured
},
'location': {
'latitude': lat,
'longitude': long
},
"output_dict": out,
"people":people
}
if id in crashes:
crashes[id].append(crash)
else:
crashes[id] = [crash]
with open(db_path, 'w') as f:
json.dump(crashes, f, indent=2)
return crash
class Crashes(Resource):
def post(self):
process_dict = copy.deepcopy(crashes)
return_dict = {}
for id in process_dict:
for i in range(len(process_dict[id])):
del process_dict[id][i]["img"]
for id in process_dict:
for i in range(len(process_dict[id])):
location = process_dict[id][i]['location']
lat, lng = float(request.form['lat']), float(request.form['lng'])
if abs(float(location['latitude']) - lat) < 0.3 and abs(float(location['longitude']) - lng) < 0.3:
if id in return_dict:
return_dict[id].append(process_dict[id][i])
else:
return_dict[id] = [process_dict[id][i]]
return return_dict
def post(self):
process_dict = copy.deepcopy(crashes)
return_dict = {}
for id in process_dict:
for i in range(len(process_dict[id])):
del process_dict[id][i]["img"]
for id in process_dict:
for i in range(len(process_dict[id])):
location = process_dict[id][i]['location']
lat, lng = float(request.form['lat']), float(request.form['lng'])
if abs(float(location['latitude']) - lat) < 0.3 and abs(float(location['longitude']) - lng) < 0.3:
if id in return_dict:
return_dict[id].append(process_dict[id][i])
else:
return_dict[id] = [process_dict[id][i]]
return return_dict
class Box:
def __init__(self,coords, type,index):
self.x1 = coords[0]
self.y1 = coords[2]
self.x2 = coords[1]
self.y2 = coords[3]
self.area = (self.x2-self.x1) * (self.y2-self.y1)
self.type = type
self.index = index
def get_bigger(self,box):
if box.type != self.type:
return None
left = max(box.x1, self.x1)
right = min(box.x2, self.x2)
bottom = max(box.y2, self.y2)
top = min(box.y1, self.y1)
if not left < right and bottom < top:
return None
area_temp = abs((right-left)*(top-bottom))
if abs((right-left)*(top-bottom))/((box.area * (box.area < self.area)) + (self.area * (box.area > self.area))) < MIN_AREA_RATIO:
return None
if box.area > self.area:
return box
else:
return self
def __init__(self,coords, type,index):
self.x1 = coords[0]
self.y1 = coords[2]
self.x2 = coords[1]
self.y2 = coords[3]
self.area = (self.x2-self.x1) * (self.y2-self.y1)
self.type = type
self.index = index
def get_bigger(self,box):
if box.type != self.type:
return None
left = max(box.x1, self.x1)
right = min(box.x2, self.x2)
bottom = max(box.y2, self.y2)
top = min(box.y1, self.y1)
if not left < right and bottom < top:
return None
area_temp = abs((right-left)*(top-bottom))
if abs((right-left)*(top-bottom))/((box.area * (box.area < self.area)) + (self.area * (box.area > self.area))) < MIN_AREA_RATIO:
return None
if box.area > self.area:
return box
else:
return self


+ 2
- 2
server_side/api/modules/complaint.py View File

@ -124,7 +124,7 @@ class Complaint(Resource):
del complaints[complaint["id"]][-1]["id"]
with open('modules/databases/complaints.json', 'w') as complaints_file:
json.dump(complaints, complaints_file, indent=4)
json.dump(complaints, complaints_file, indent=2)
class Complaints(Resource):
@ -139,5 +139,5 @@ class ComplaintsUpdate(Resource):
complaints[args.get("id")][int(args.get("index"))]["response"]["message"] = args.get("message")
complaints[args["id"]][int(args["index"])]["response"]["status"] = True
with open('modules/databases/complaints.json', 'w') as complaints_file:
json.dump(complaints, complaints_file, indent=4)
json.dump(complaints, complaints_file, indent=2)
return

+ 16
- 15
server_side/api/modules/databases/complaints.json
File diff suppressed because it is too large
View File


+ 43
- 996
server_side/api/modules/databases/crashes.json
File diff suppressed because it is too large
View File


+ 1
- 1
server_side/api/modules/denunciation.py View File

@ -48,7 +48,7 @@ class Alert(Resource):
denunciations.append(denunciation)
with open(db_path, 'w') as f:
json.dump(denunciations, f, indent=4)
json.dump(denunciations, f, indent=2)
return {'success': True}
else:


+ 3
- 3
server_side/api/modules/navigation.py View File

@ -309,7 +309,7 @@ class Transit(Resource):
if change:
bus_data = open("modules/databases/bus.json", "w")
bus_data.write(json.dumps(bus_json, indent=4, sort_keys=True))
bus_data.write(json.dumps(bus_json, indent=2, sort_keys=True))
bus_data.close()
for route in travel["routes"]:
@ -333,7 +333,7 @@ class Transit(Resource):
if change:
bus_data = open("modules/databases/bus.json", "w")
bus_data.write(json.dumps(bus_json, indent=4, sort_keys=True))
bus_data.write(json.dumps(bus_json, indent=2, sort_keys=True))
bus_data.close()
for i in range(len(shortest["routes"])):
@ -361,7 +361,7 @@ class Transit(Resource):
if change:
bus_data = open("modules/databases/bus.json", "w")
bus_data.write(json.dumps(bus_json, indent=4, sort_keys=True))
bus_data.write(json.dumps(bus_json, indent=2, sort_keys=True))
bus_data.close()
return shortest


+ 2
- 2
server_side/api/modules/rating_system.py View File

@ -80,7 +80,7 @@ class Ratings(Resource):
# ratings.append(rating)
#
# with open(db_path, 'w') as f:
# json.dump(ratings, f, indent=4)
# json.dump(ratings, f, indent=2)
#
# return rating
@ -119,7 +119,7 @@ class Rate(Resource):
'note': note
}
with open(db_path, 'w') as f:
json.dump(ratings, f, indent=4)
json.dump(ratings, f, indent=2)
return {'message': 'Success'}


+ 1
- 1
server_side/api/modules/smart_park.py View File

@ -116,7 +116,7 @@ while 0:
data = generateAvg(locs,im,data)
with open("modules/databases/park_data.json","w") as f:
f.write(json.dumps(data,indent=4))
f.write(json.dumps(data,indent=2))
exit(0)
class Empty(Resource):


+ 2
- 2
server_side/api/modules/user_info.py View File

@ -43,7 +43,7 @@ class Users(Resource):
users.append(user)
with open(db_path, 'w') as f:
json.dump(users, f, indent=4)
json.dump(users, f, indent=2)
return user
@ -95,7 +95,7 @@ class ReducePoints(Resource):
users[username]['points'] -= int(request.form['reduce'])
with open(db_path, 'w') as f:
json.dump(users, f, indent=4)
json.dump(users, f, indent=2)
else:
abort(404, error="User {} doesn't exist".format(user_id))


+ 1
- 1
server_side/api/modules/user_set.py View File

@ -31,6 +31,6 @@ for file in os.listdir("images"):
users[k]['face_encoding'] = list(face_encoding)
with open('modules/databases/users.json', 'w') as f:
users = json.dump(users, f, indent=4)
users = json.dump(users, f, indent=2)
os.remove(full_path)

+ 2
- 2
server_side/api/modules/voting_system.py View File

@ -73,7 +73,7 @@ class Votings(Resource):
votings.append(voting)
with open(db_path, 'w') as f:
json.dump(votings, f, indent=4)
json.dump(votings, f, indent=2)
return {'message': 'Success'}
@ -105,7 +105,7 @@ class Vote(Resource):
votings[voting_id]['votes'][str(vote_id)]['votes'] += 1
votings[voting_id]['voters'].append(voter_id)
with open(db_path, 'w') as f:
json.dump(votings, f, indent=4)
json.dump(votings, f, indent=2)
return {'message': 'Success'}


+ 29
- 103
traffic_analyzer/ambulance_detect.py View File

@ -25,6 +25,9 @@ import json
import base64
from PIL import Image
from io import BytesIO
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import ssl
switch = 1
@ -35,6 +38,9 @@ sys.path.append("..")
import time
from object_detection.utils import ops as utils_ops
AI_IP = '10.10.26.161'
context = ssl._create_unverified_context()
# What model to download.
@ -42,47 +48,17 @@ from object_detection.utils import ops as utils_ops
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
#MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17' #not even worth trying
#MODEL_NAME = "ssd_inception_v2_coco_2017_11_17" # not bad and fast
MODEL_NAME = "rfcn_resnet101_coco_2018_01_28" # WORKS BEST BUT takes 4 times longer per image
#MODEL_NAME = "faster_rcnn_resnet101_coco_11_06_2017" # too slow
#MODEL_NAME = "ssd_resnet101_v1_fpn_shared_box_predictor_oid_512x512_sync_2019_01_20"
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('object_detection/data', 'mscoco_label_map.pbtxt')
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
if sys.platform == "win32":
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'object_detection/test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(3, 6) ]
# Size, in inches, of the output images.
sess = 0
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
data = {"gpu_temp":"10C","gpu_load":"15%","cpu_temp":"47C","cpu_load":"15%","mem_temp":"NaN","mem_load":"17%","fan_speed":"10000RPM"}
@ -96,54 +72,15 @@ def get_temps():
data["fan_speed"] = str(psutil.sensors_fans()["dell_smm"][0][1])+"RPM"
def run_inference_for_single_image(image, graph):
global switch
global sess
with graph.as_default():
if(switch):
sess = tf.Session()
switch = 0
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[1], image.shape[2])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def run_inference_for_single_image(image):
_, buffer = cv2.imencode('.jpg', image)
img_base64 = base64.b64encode(buffer).decode('ascii')
url = 'https://%s:5001/ai' % AI_IP # Set destination URL here
post_fields = {'img': img_base64,"type":"coco"} # Set POST fields here
request = Request(url, urlencode(post_fields).encode())
data = urlopen(request, context=context).read().decode("ascii")
output_dict = json.loads(json.loads(data))
return output_dict
kill = True
@ -164,14 +101,9 @@ socket_switch = True
thread = threading.Thread(target=listener)
thread.start()
if sys.platform == "win32":
with detection_graph.as_default():
sess = tf.Session()
cam = cv2.VideoCapture(0)
else:
cam = cv2.VideoCapture('debug_data/amb_1.mp4')
with open("debug_data/frame_data.pkl","rb") as pkl_file:
frame_data = pickle.load(pkl_file)
cam = cv2.VideoCapture(2)
switch = 0
get_temps()
@ -183,28 +115,22 @@ reps_vid = 0
while 1:
ret,image = cam.read()
reps_vid += 1
if not sys.platform == "win32" and not reps_vid % 2 == 0:
continue
reps += 1
try: # Kavşak
t1 = time.time()
image_np = image
image_np_expanded = np.expand_dims(image_np, axis=0)
if sys.platform == "win32":
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
else:
output_dict = frame_data[reps]
output_dict = run_inference_for_single_image(image_np)
height, width, channels = image_np.shape
out_dict = {'detection_boxes': [], 'detection_classes': [], 'detection_scores': []}
for i in output_dict['detection_classes']:
for index,i in enumerate(output_dict['detection_classes']):
cont = False
if i in [3, 6, 8]: # Car, bus, truck
index = np.where(output_dict['detection_classes'] == i)[0][0]
if i in [3, 6, 8,44,77]: # Car, bus, truck
score = output_dict['detection_scores'][index]
if score > 0.3:
if not any((output_dict['detection_boxes'][index] == b).all() for b in out_dict['detection_boxes']):
if not any((output_dict['detection_boxes'][index] == b) for b in out_dict['detection_boxes']):
avg_x = (output_dict['detection_boxes'][index][0] + output_dict['detection_boxes'][index][2])/2
avg_y = (output_dict['detection_boxes'][index][1] + output_dict['detection_boxes'][index][3])/2
for box in out_dict['detection_boxes']:


+ 2
- 2
traffic_analyzer/images/train_image_taker.py View File

@ -87,9 +87,9 @@ def cut_image():
return cut_rects
coordinates = cut_image()
print(json.dumps(coordinates,indent=4))
print(json.dumps(coordinates,indent=2))
with open("coordinates.json","w") as file:
file.write(json.dumps(coordinates,indent=4))
file.write(json.dumps(coordinates,indent=2))

+ 2
- 2
traffic_analyzer/object_detection/data/mscoco_label_map.pbtxt View File

@ -196,7 +196,7 @@ item {
item {
name: "/m/04dr76w"
id: 44
display_name: "bottle"
display_name: "Ambulance"
}
item {
name: "/m/09tvcd"
@ -336,7 +336,7 @@ item {
item {
name: "/m/050k8"
id: 77
display_name: "cell phone"
display_name: "ambulance"
}
item {
name: "/m/0fx9l"


Loading…
Cancel
Save