This commit is contained in:
louiscklaw
2025-01-31 19:15:17 +08:00
parent 09adae8c8e
commit 6c60a73f30
1546 changed files with 286918 additions and 0 deletions

View File

@@ -0,0 +1,10 @@
# AI-BlurFaceInVideo
## Installation
Use the package manager npm to install python-opencv, face_recognition, Flask, numpy.
```bash
pip install opencv-python
pip install face_recognition
pip install numpy
pip install Flask
```

View File

@@ -0,0 +1,212 @@
from flask import Flask
from flask import render_template
from flask import request, redirect
from werkzeug.utils import secure_filename
import time
import os
import cv2
import face_recognition
import time
import numpy as np
app = Flask(__name__)
app.config["IMAGE_UPLOADS"] = './upload-video/'
@app.route('/index')
@app.route('/')
def index():
return render_template("index.html")
@app.route('/upload', methods=["GET", "POST"])
def uploadVideo():
if request.method == 'POST':
if request.files:
video = request.files["video"]
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = secure_filename(timestr) + ".mp4"
video.save(os.path.join(app.config["IMAGE_UPLOADS"], filename))
return redirect('/upload/ml?file=' + timestr)
else:
messages = "Missing the file"
return render_template("index.html", messages = messages)
else:
return render_template("index.html")
@app.route('/upload/ml', methods=["GET", "POST"])
def process():
fileName = request.args.get('file')
cap = cv2.VideoCapture('./upload-video/' + fileName +'.mp4')
length = 20
timestr = time.strftime("%Y%m%d-%H%M%S")
face_locations = []
face_encodings = []
detected_face = []
detected_face_img = []
frame_number = 0
save_image_count = 0
fourcc = cv2.VideoWriter_fourcc(*'x264')
output_movie = cv2.VideoWriter('./static/save-data/' + timestr + '.mp4', fourcc, length, (1280, 720))
while True:
# Grab a single frame of video
ret, frame = cap.read()
appendData = False
if not ret:
break
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
if(len(detected_face) == 0):
for face_encoding in face_encodings:
detected_face.append(face_encoding)
appendData = True
else:
print('else')
for face_encoding in face_encodings:
matchs = face_recognition.compare_faces(detected_face, face_encoding, tolerance=0.5)
if any(matchs) == False:
face_distances = face_recognition.face_distance(detected_face, face_encoding)
for face_distance in face_distances:
print(face_distance)
if face_distance > 0.65:
appendData = True
detected_face.append(face_encoding)
print("append")
for top, right, bottom, left in face_locations:
# Draw a box around the face
if appendData:
crop_img = frame[top:bottom, left:right]
save_image_count = save_image_count + 1
cv2.imwrite("./static/save-data/" + timestr + '-' + str(save_image_count)+".png", crop_img)
detected_face_img.append({"id": save_image_count, "url" : "/static/save-data/" + timestr + '-' + str(save_image_count)+".png"})
print('Save Image')
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0,
255), 2)
# Display the resulting image
output_movie.write(frame)
#cv2.imshow('Video', frame)
# Wait for Enter key to stop
return render_template("process.html", detected_face_img = detected_face_img, fileName = timestr)
@app.route('/upload/ml/render', methods=["POST"])
def render():
fileName = request.form.get('file')
print('ifle =' +fileName)
seletedFormFace = request.form.getlist('face')
print(seletedFormFace)
cap = cv2.VideoCapture('./upload-video/' + fileName +'.mp4')
length = 20
seletedFaces = []
detected_face = []
face_locations = []
face_encodings = []
return_seleted_img = []
detected_face_count = 0
for face in seletedFormFace:
# image = face_recognition.load_image_file("./static/save-data/" + fileName + '-' + face+".png")
# seletedFaces.append(face_recognition.face_encodings(image)[0])
seletedFaces.append(int(face) - 1)
return_seleted_img.append("/static/save-data/" + fileName + '-' + face+ ".png")
print(seletedFaces)
fourcc = cv2.VideoWriter_fourcc(*'x264')
output_movie = cv2.VideoWriter('./static/save-data/' + fileName + '-finish.mp4', fourcc, length, (1280, 720))
while True:
# Grab a single frame of video
ret, frame = cap.read()
match_check = False
if not ret:
break
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
match_list = []
if(len(detected_face) == 0):
for face_encoding in face_encodings:
detected_face.append(face_encoding)
detected_face_count = detected_face_count + 1
for face_encoding in face_encodings:
matchs = face_recognition.compare_faces(detected_face, face_encoding, tolerance=0.5)
if any(matchs) == False:
face_distances = face_recognition.face_distance(detected_face, face_encoding)
for face_distance in face_distances:
print(face_distance)
if face_distance > 0.65:
detected_face.append(face_encoding)
print('append')
detected_face_count = detected_face_count + 1
for face in detected_face:
matchs = face_recognition.compare_faces(face_encodings, face, tolerance=0.5)
match_count = 0
for match in matchs:
print(matchs)
if match == True and match_count in seletedFaces:
print('match')
print(match)
match_check = True
match_list.append(match_count)
print(match_count)
match_count = match_count + 1
#match_count = 0
#for match in matchs:
# print(matchs)
# if match == True:
# print('match')
# print(match)
# match_check = True
# match_list.append(match_count)
# print(match_count)
# match_count = match_count + 1
print(match_list)
if match_check == True:
location_count = 0
for top, right, bottom, left in face_locations:
print(match_list)
print(location_count)
if location_count in match_list:
print('loca')
print(location_count)
roi_color = rgb_frame[top:bottom, left:right]
# blur the colored image
blur = cv2.GaussianBlur(roi_color, (99,99), 30)
# Insert ROI back into image
rgb_frame[top:bottom, left:right] = blur
location_count = location_count + 1
# Draw a box around the face
# Display the resulting image
output_movie.write(frame)
#cv2.imshow('Video', frame)
# Wait for Enter key to stop
return render_template("render.html", detected_face_img = return_seleted_img, fileName = fileName)
@app.route('/upload/test')
def test():
detected_face_img = []
detected_face_img.append({"id":1, "url": "/static/save-data/20200507-032151-1.png"})
detected_face_img.append({"id":2, "url": "/static/save-data/20200507-032151-2.png"})
detected_face_img.append({"id":3, "url": "/static/save-data/20200507-032151-3.png"})
return render_template("process.html", detected_face_img = detected_face_img, fileName = '20200507-032151')
if __name__ == '__main__':
app.run()

View File

@@ -0,0 +1,113 @@
/*
*
* ==========================================
* CUSTOM UTIL CLASSES
* ==========================================
*
*/
#upload {
opacity: 0;
}
#upload-label {
position: absolute;
top: 35%;
left: 1rem;
transform: translateY(-50%);
}
.image-area {
border: 2px dashed rgba(255, 255, 255, 0.7);
padding: 1rem;
position: relative;
}
.image-area::before {
content: 'Uploaded image result';
color: #fff;
font-weight: bold;
text-transform: uppercase;
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
font-size: 0.8rem;
z-index: 1;
}
.image-area img {
z-index: 2;
position: relative;
}
.btn-render{
margin-top: 80px;
}
/*
*
* ==========================================
* FOR DEMO PURPOSES
* ==========================================
*
*/
body {
min-height: 100vh;
background-color: #757f9a;
background-image: linear-gradient(147deg, #757f9a 0%, #d7dde8 100%);
}
input[type="checkbox"] {
display: none;
}
label {
display: block;
position: relative;
margin: 10px;
cursor: pointer;
}
label:before {
background-color: white;
color: white;
content: " ";
display: block;
border-radius: 50%;
border: 1px solid grey;
position: absolute;
top: -5px;
left: -5px;
width: 25px;
height: 25px;
text-align: center;
line-height: 28px;
transition-duration: 0.4s;
transform: scale(0);
z-index: 999;
}
label img {
height: 80px;
width: 80px;
transition-duration: 0.2s;
transform-origin: 50% 50%;
}
:checked + label {
border-color: #ddd;
}
:checked + label:before {
content: "✓";
background-color: grey;
transform: scale(1);
}
:checked + label img {
transform: scale(0.9);
/* box-shadow: 0 0 5px #333; */
z-index: -1;
}
/*

View File

@@ -0,0 +1,33 @@
/* ==========================================
SHOW UPLOADED IMAGE
* ========================================== */
function readURL(input) {
if (input.files && input.files[0]) {
var reader = new FileReader();
reader.onload = function (e) {
$('#imageResult')
.attr('src', e.target.result);
};
reader.readAsDataURL(input.files[0]);
}
}
$(function () {
$('#upload').on('change', function () {
readURL(input);
});
});
/* ==========================================
SHOW UPLOADED IMAGE NAME
* ========================================== */
var input = document.getElementById( 'upload' );
var infoArea = document.getElementById( 'upload-label' );
input.addEventListener( 'change', showFileName );
function showFileName( event ) {
var input = event.srcElement;
var fileName = input.files[0].name;
infoArea.textContent = 'File name: ' + fileName;
}

View File

@@ -0,0 +1,73 @@
import cv2
import face_recognition
import time
cap = cv2.VideoCapture('./data/source-1.mp4')
length = 20
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
output_movie = cv2.VideoWriter('output2_9.mp4', fourcc, length, (1280, 720))
face_locations = []
face_encodings = []
detected_face = []
frame_number = 0
timestr = time.strftime("%Y%m%d-%H%M%S")
save_image_count = 0
fourcc = cv2.VideoWriter_fourcc(*'x264')
output_movie = cv2.VideoWriter(timestr + '.mp4', fourcc, length, (1280, 720))
while True:
# Grab a single frame of video
ret, frame = cap.read()
appendData = False
if not ret:
break
# Convert the image from BGR color (which OpenCV uses) to RGB
# color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces in the current frame of video
# print('code')
# print(face_locations)
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
for endcoding in face_encodings:
if(len(detected_face) == 0):
for face_encoding in face_encodings:
detected_face.append(face_encoding)
appendData = True
else:
print('else')
count = 1
for face_encoding in face_encodings:
matchs = face_recognition.compare_faces(detected_face, face_encoding, tolerance=0.5)
if any(matchs) == False:
face_distances = face_recognition.face_distance(detected_face, face_encoding)
for face_distance in face_distances:
print(face_distance)
if face_distance > 0.65:
appendData = True
detected_face.append(face_encoding)
print("append")
for top, right, bottom, left in face_locations:
# Draw a box around the face
if appendData:
crop_img = frame[top:bottom, left:right]
save_image_count = save_image_count + 1
cv2.imwrite("./save-data/" + timestr + '-' + str(save_image_count)+".png", crop_img)
print('Save Image')
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0,
255), 2)
# Display the resulting image
output_movie.write(frame)
#cv2.imshow('Video', frame)
# Wait for Enter key to stop
print(len(detected_face))
output_movie.release()
cv2.destroyAllWindows()

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,113 @@
/*
*
* ==========================================
* CUSTOM UTIL CLASSES
* ==========================================
*
*/
#upload {
opacity: 0;
}
#upload-label {
position: absolute;
top: 35%;
left: 1rem;
transform: translateY(-50%);
}
.image-area {
border: 2px dashed rgba(255, 255, 255, 0.7);
padding: 1rem;
position: relative;
}
.image-area::before {
content: 'Uploaded image result';
color: #fff;
font-weight: bold;
text-transform: uppercase;
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
font-size: 0.8rem;
z-index: 1;
}
.image-area img {
z-index: 2;
position: relative;
}
.btn-render{
margin-top: 80px;
}
/*
*
* ==========================================
* FOR DEMO PURPOSES
* ==========================================
*
*/
body {
min-height: 100vh;
background-color: #757f9a;
background-image: linear-gradient(147deg, #757f9a 0%, #d7dde8 100%);
}
input[type="checkbox"] {
display: none;
}
label {
display: block;
position: relative;
margin: 10px;
cursor: pointer;
}
label:before {
background-color: white;
color: white;
content: " ";
display: block;
border-radius: 50%;
border: 1px solid grey;
position: absolute;
top: -5px;
left: -5px;
width: 25px;
height: 25px;
text-align: center;
line-height: 28px;
transition-duration: 0.4s;
transform: scale(0);
z-index: 999;
}
label img {
height: 80px;
width: 80px;
transition-duration: 0.2s;
transform-origin: 50% 50%;
}
:checked + label {
border-color: #ddd;
}
:checked + label:before {
content: "✓";
background-color: grey;
transform: scale(1);
}
:checked + label img {
transform: scale(0.9);
/* box-shadow: 0 0 5px #333; */
z-index: -1;
}
/*

View File

@@ -0,0 +1,33 @@
/* ==========================================
SHOW UPLOADED IMAGE
* ========================================== */
function readURL(input) {
if (input.files && input.files[0]) {
var reader = new FileReader();
reader.onload = function (e) {
$('#imageResult')
.attr('src', e.target.result);
};
reader.readAsDataURL(input.files[0]);
}
}
$(function () {
$('#upload').on('change', function () {
readURL(input);
});
});
/* ==========================================
SHOW UPLOADED IMAGE NAME
* ========================================== */
var input = document.getElementById( 'upload' );
var infoArea = document.getElementById( 'upload-label' );
input.addEventListener( 'change', showFileName );
function showFileName( event ) {
var input = event.srcElement;
var fileName = input.files[0].name;
infoArea.textContent = 'File name: ' + fileName;
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,57 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>AI Face Blur in Video</title>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="{{ url_for('static', filename='css/style.css') }}">
<link rel="stylesheet" type="text/css" href="https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css">
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.slim.min.js"></script>
</head>
<body>
<div class="container py-5" style="margin-top: 250px;">
<!-- For demo purpose -->
<header class="text-white text-center">
<h1 class="display-4">AI Blur Faces in Video </h1>
<p class="lead mb-0">AI will detect the video faces, you can select you want to blur of face </p>
<p> </p>
</header>
<br>
<div class="row py-4">
<div class="col-lg-6 mx-auto">
<form action="/upload" method="POST" enctype="multipart/form-data">
<!-- Upload image input-->
<div class="input-group mb-3 px-2 py-2 rounded-pill bg-white shadow-sm">
<input id="upload" type="file" name="video" id="video" onchange="readURL(this);" class="form-control border-0">
<label id="upload-label" for="upload" class="font-weight-light text-muted">Choose file</label>
<div class="input-group-append">
<label for="upload" class="btn btn-light m-0 rounded-pill px-4"> <i class="fa fa-cloud-upload mr-2 text-muted"></i><small class="text-uppercase font-weight-bold text-muted">Choose file</small></label>
</div>
</div>
<div class="mb-3 text-center" style="margin-top: 80px;">
<button type="submit" id="btnProcess" class="btn btn-outline-light">Process</button>
</div>
</form>
</div>
</div>
</div>
<footer>
</footer>
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
<script>
$(document).ready(function(){
$('#btnProcess').click(function(){
$(this).html('<div class="spinner-border" role="status"><span class="sr-only">Loading...</span></div>');
})
});
</script>
</body>
</html>

View File

@@ -0,0 +1,63 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>AI Face Blur in Video</title>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="{{ url_for('static', filename='css/style.css') }}">
<link rel="stylesheet" type="text/css" href="https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css">
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.slim.min.js"></script>
</head>
<style>
body{
overflow: hidden;
}
</style>
<body>
<div class="py-5" style="height: 900px;background-color: white;margin-top: 85px; padding: 50px; ">
<form action="ml/render" method="POST">
<div class="row">
<div class="col-md-4">
<div style="overflow-y: scroll;">
<h1>{{filename}}</h1>
<p>You can select your want to blur faces:</p>
<div class="d-flex flex-row bd-highlight mb-4">
{% for people in detected_face_img %}
<div class="p-2 bd-highlight">
<input type="checkbox" name="face" id="{{people.id}}" value="{{people.id}}" />
<label for="{{people.id}}"><img src="{{people.url}}" /></label>
</div>
{% endfor %}
</div>
</div>
</div>
<div class="col-md-8">
<div class="embed-responsive embed-responsive-16by9">
<video class="video-fluid z-depth-1" autoplay loop controls muted>
<source src="/static/save-data/{{fileName}}.mp4" type="video/mp4" />
</video>
</div>
</div>
</div>
<div class="btn-render">
<button type="submit" id="btnRender" class="btn btn-dark float-right" style="margin-top: 10px;">Render</button>
</div>
<input type="hidden" id="file" name="file" value="{{fileName}}">
</form>
</div>
<footer>
</footer>
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
</body>
<script>
$(document).ready(function(){
$('#btnRender').click(function(){
$(this).html('<div class="spinner-grow text-light" role="status"><span class="sr-only">Loading...</span></div>');
});
});
</script>
</html>

View File

@@ -0,0 +1,57 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>AI Face Blur in Video</title>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="{{ url_for('static', filename='css/style.css') }}">
<link rel="stylesheet" type="text/css" href="https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css">
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.slim.min.js"></script>
</head>
<style>
body{
overflow-y: scroll;
}
</style>
<body>
<div class="py-5" style="height: 100%;background-color: white;margin-top: 85px; margin-bottom: 85px; padding: 50px; ">
<div class="mx-auto" style="width: 60%;">
<div class="embed-responsive embed-responsive-16by9">
<video class="video-fluid z-depth-1" autoplay loop controls muted>
<source src="/static/save-data/{{fileName}}-finish.mp4" type="video/mp4" />
</video>
</div>
<br>
<a href="/static/save-data/{{fileName}}-finish.mp4" download><button class="btn btn-info"><i class="fa fa-download"></i> Download</button></a>
</div>
<div style="margin-top: 50px;">
<div class="text-center">
<h4>You selected the faces:</h4>
<div class="row">
<div class="col-md-12">
<div class="overflow-auto">
<div class="d-flex flex-row bd-highlight mb-10 justify-content-center">
{% for people in detected_face_img %}
<div class="p-2 bd-highlight">
<input type="checkbox" name="person" value="{{people.id}}" />
<label for="{{people.id}}"><img src="{{people}}" /></label>
</div>
{% endfor %}
</div>
</div>
</div>
<div>
</div>
</div>
<footer>
</footer>
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
</body>
</html>