~hamner/HomeButler

b478d5cf5bdaff2cb4cd65351b2f9fc5875ced73 — David Hamner 2 years ago 5bdd9ea master
Clean up
11 files changed, 0 insertions(+), 709 deletions(-)

D background_scripts/copy_print.py
D background_scripts/office_door_motion.py
D background_scripts/vosk_dump.py
D security sensors/sensors.py
D security sensors/settings.txt
D security sensors/setup.txt
D security sensors/startup.sh
D security sensors/templates/fail.html
D security sensors/templates/form.html
D security sensors/templates/index.html
D security sensors/templates/pass.html
D background_scripts/copy_print.py => background_scripts/copy_print.py +0 -73
@@ 1,73 0,0 @@
#!/usr/bin/python3
#home-butler
#Copyright (C) 2022  David Hamner

#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.

#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#GNU General Public License for more details.

#You should have received a copy of the GNU General Public License
#along with this program.  If not, see <http://www.gnu.org/licenses/>.
import os
import time
import requests
from butler_common import *
#print("cancel print")
current_voice_cmd = ""
#cmd = "curl -k4 --request POST -H '" + OCTOPRINT_API_KEY + "' -H 'Content-Type: application/json' --data '{\"command\":\"cancel\"}' http://10.250.10.207/api/job"
cmd = "curl -k4 --request GET -H '" + OCTOPRINT_API_KEY + "' -H 'Content-Type: application/json' http://10.250.10.207/api/job"
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
data = json.loads(result.stdout.decode())
print(data['job']['file']['name'])
print(data['state'])
#cmd = "espeak 'canceling print disabled'"
#os.system(cmd)

master = "http://10.250.10.207"
slave = "http://10.250.10.15"

last_state = ""

while True:
    
    cmd = "curl -k4 --request GET -H '" + OCTOPRINT_API_KEY + "' -H 'Content-Type: application/json' http://10.250.10.207/api/job"
    result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
    try:
        data = json.loads(result.stdout.decode())
    except Exception:
        print("Octoprint likely not running")
        time.sleep(10)
        continue
    new_state = data['state']
    if last_state == "Operational" and new_state == "Printing":
        file_name = data['job']['file']['name']
        
        #Download from master
        gcode_url = f"{master}/downloads/files/local/{file_name}"
        cmd = "curl -k4 --request GET -H '" + OCTOPRINT_API_KEY + f"' -H 'Content-Type: application/json' {gcode_url}"
        result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
        gcode = result.stdout.decode()
        with open(f"/tmp/{file_name}", "w+") as fh:
            fh.write(gcode)
        
        #upload to slave
        cmd = f'curl -k -H "{OCTOPRINT_API_KEY}" -F "select=false" -F "print=false" -F "file=@/tmp/{file_name}" "{slave}/api/files/local"'
        print(cmd)
        result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
        print(result.stdout.decode())
        
        #Tringer print
        cmd = "curl -k4 --request POST -H '" + OCTOPRINT_API_KEY + "' -H 'Content-Type: application/json' --data '{\"command\":\"select\",\"print\":true}' " + slave + "/api/files/local/" + file_name
        os.system(cmd)
        
        print (f"Mirror job started {file_name}")
    else:
        print(f"status: {new_state}")
    last_state = new_state
    time.sleep(10)

D background_scripts/office_door_motion.py => background_scripts/office_door_motion.py +0 -71
@@ 1,71 0,0 @@
#!/usr/bin/python3


#GPL3 based on: https://github.com/jb-0001/object-motion-detection

import os
import cv2



video = cv2.VideoCapture("rtsp://10.250.10.15:8554/unicast")

"""
while(True):
    ret, frame = video.read()
    cv2.imshow('VIDEO', frame)
    cv2.waitKey(1)
"""


ret, frame1 = video.read()
ret, frame2 = video.read()

lastPose = -1
num_move_frames = 0
while video.isOpened():
    difference = cv2.absdiff(frame1, frame2)
    grayscale = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(grayscale, (5, 5), 0)
    _, threshold = cv2.threshold(blur, 35, 255, cv2.THRESH_BINARY)
    dilated = cv2.dilate(threshold, None, iterations=2)
    contours, _ = cv2.findContours(
    dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    for contour in contours:
        (x, y, w, h) = cv2.boundingRect(contour)
        if cv2.contourArea(contour) < 950:
            continue
        cv2.drawContours(frame1, contours, -1, (0, 255, 0), 1)
        cv2.rectangle(frame1, (x, y), (x+w, y+h), (120, 0, 150), 2)
        lastPose = x
    if contours != []:
        num_move_frames = num_move_frames + 1
        #print(f"Updating lastPose: {lastPose}")
    if contours == []:
        if lastPose != -1:
            print(f"End of movement: {lastPose}, amount: {num_move_frames}")
            if num_move_frames < 35:
                print(f"Motion to fast: {num_move_frames}")
            elif num_move_frames > 90:
                print(f"Motion too slow: {num_move_frames}")
            elif lastPose < 200:
                print("Welcome to the office")
                os.system(f"curl 10.250.10.133:5000/person_entered_office")
            else:
                print("Bye for now")
                os.system(f"curl 10.250.10.133:5000/person_exited_office")
            lastPose = -1
            num_move_frames = 0
    #cv2.imshow("feed", frame1)
    cv2.waitKey(1)
    frame1 = frame2
    ret, frame2 = video.read()




cv2.destroyAllWindows()
video.release()



D background_scripts/vosk_dump.py => background_scripts/vosk_dump.py +0 -133
@@ 1,133 0,0 @@
#!/usr/bin/env python3

import argparse
import os
import queue
import sounddevice as sd
import vosk
import sys
import urllib.parse

script_path = os.path.realpath(os.path.abspath(__file__))
script_dir = os.path.dirname(script_path)
print(script_dir)
os.chdir(script_dir)

q = queue.Queue()

def int_or_str(text):
    """Helper function for argument parsing."""
    try:
        return int(text)
    except ValueError:
        return text

def callback(indata, frames, time, status):
    """This is called (from a separate thread) for each audio block."""
    #if status:
        #print(status, file=sys.stderr)
    q.put(bytes(indata))

parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
    '-l', '--list-devices', action='store_true',
    help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
    print(sd.query_devices())
    parser.exit(0)
parser = argparse.ArgumentParser(
    description=__doc__,
    formatter_class=argparse.RawDescriptionHelpFormatter,
    parents=[parser])
parser.add_argument(
    '-f', '--filename', type=str, metavar='FILENAME',
    help='audio file to store recording to')
parser.add_argument(
    '-m', '--model', type=str, metavar='MODEL_PATH',
    help='Path to the model')
parser.add_argument(
    '-d', '--device', type=int_or_str,
    help='input device (numeric ID or substring)')
parser.add_argument(
    '-r', '--samplerate', type=int, help='sampling rate')
args = parser.parse_args(remaining)

try:
    if args.model is None:
        args.model = "model"
    if not os.path.exists(args.model):
        print ("Please download a model for your language from https://alphacephei.com/vosk/models")
        print ("and unpack as 'model' in the current folder.")
        parser.exit(0)
    if args.samplerate is None:
        device_info = sd.query_devices(args.device, 'input')
        # soundfile expects an int, sounddevice provides a float:
        args.samplerate = int(device_info['default_samplerate'])

    model = vosk.Model(args.model)

    if args.filename:
        dump_fn = open(args.filename, "wb")
    else:
        dump_fn = None

    with sd.RawInputStream(samplerate=args.samplerate, blocksize = 8000, device=args.device, dtype='int16',
                            channels=1, callback=callback):
            print('#' * 80)
            print('Press Ctrl+C to stop the recording')
            print('#' * 80)

            #rec = vosk.KaldiRecognizer(model, )
            rec = vosk.KaldiRecognizer(model, args.samplerate)
            processed_words = ""
            while True:
                data = q.get()
                ok = rec.AcceptWaveform(data)
                if ok:
                    
                    cmd = rec.Result().split('"')[-2]
                    if cmd.startswith("the"):
                        cmd = cmd[3:].strip()
                    
                    processed_words = ""
                    #print(f"\nCMD: {cmd}\n")
                    #url_cmd = urllib.parse.quote(cmd)
                    cmd = f"curl 'http://10.250.10.133:5000/voice_cmd_reset'"
                    #print(cmd)
                    os.system(cmd)
                    #print(rec.Result())
                else:
                    new_word = ""
                    part = rec.PartialResult().split('"')[-2]
                    if part.startswith("the"):
                        part = part[3:].strip()
                    if processed_words in part:
                        if processed_words != "":
                            new_word = part.split(processed_words)[-1]
                        else:
                            new_word = part
                    elif len(processed_words.split(" ")) > 1:
                        num_words_to_keep = len(part.split(" ")) - len(processed_words.split(" "))
                        if num_words_to_keep <= 1:
                            new_word = " ".join(part.split(" ")[:num_words_to_keep*-1])
                    else:
                        new_word = part       
                    if new_word != "":
                        print(f"\rNew: {new_word}")
                        new_word = new_word.strip()
                        processed_words = part
                        url_cmd = urllib.parse.quote(new_word)
                        cmd = f"curl 'http://10.250.10.133:5000/voice_cmd_update/{url_cmd}'"
                        #print(cmd)
                        os.system(cmd)

                    #print(f"Part: {part}",end='\r')
                #if dump_fn is not None:
                    #dump_fn.write(data)

except KeyboardInterrupt:
    print('\nDone')
    parser.exit(0)
except Exception as e:
    parser.exit(type(e).__name__ + ': ' + str(e))

D security sensors/sensors.py => security sensors/sensors.py +0 -328
@@ 1,328 0,0 @@
#!/usr/bin/python3
#home-butler
#GNU LGPL v3
#Copyright (C) 2022 David Hamner
#hamner@librem.one

#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 3 of the License, or (at your option) any later version.

#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#Lesser General Public License for more details.

#You should have received a copy of the GNU Lesser General Public License
#along with this program; if not, write to the Free Software Foundation,
#Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.

import cv2
import numpy as np
from datetime import datetime
import time
from threading import Thread
import subprocess
import os
import json
from signal import signal, SIGINT
from flask_opencv_streamer.streamer import Streamer
import torch, detectron2
TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
CUDA_VERSION = torch.__version__.split("+")[-1]
print("torch: ", TORCH_VERSION, "; cuda: ", CUDA_VERSION)
print("detectron2:", detectron2.__version__)
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)


port = 2468
require_login = False


#Thank you to: https://github.com/abhishek305/Motion-capture-using-opencv-contours/blob/master/Contours%20Opencv.py
#Some of this was based on ^^^
script_path = os.path.realpath(os.path.abspath(__file__))
script_dir = os.path.dirname(script_path)
event_base = f"{script_dir}/events/"
if not os.path.isdir(event_base):
    os.mkdir(event_base)

#Main settings
boarder = 70 
num_frames_needed_to_trigger = 3
num_frames_needed_to_end_trigger = 10
min_move_area = 600
triger_move_area = 12000
too_many_motion_points =  400

max_back_log = 3
eyes_busy = False


#cam_devices= {"Potato": "rtsp://10.250.10.225:554/Streaming/Channes/ID/?transportmode=unicast"}
#cap=cv2.VideoCapture("rtsp://10.250.10.225:554/Streaming/Channes/ID/?transportmode=unicast")

exit = False


def load_settings():
    cam_devices = {}
    settings_file_path = f"{script_dir}/settings.txt"
    with open(settings_file_path) as settings_file:
        for config_line in settings_file.readlines():
            if config_line.startswith("#") or config_line.strip() == "":
                continue
            raw_data = config_line.strip().split("~")
            name = raw_data[0]
            device = raw_data[1]
            cam_devices[name] = device
    return(cam_devices)
cam_devices = load_settings()
print(cam_devices)


def what_is_this(file_name):
    global eyes_busy
    global predictor
    eyes_busy = True
    found_objs = []
    
    im = cv2.imread(file_name)

    with torch.no_grad():
        outputs = predictor(im)
    instances = outputs["instances"]
    detected_class_indexes = instances.pred_classes
    prediction_boxes = instances.pred_boxes
    metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
    class_catalog = metadata.thing_classes

    #Thank you to: https://stackoverflow.com/a/68471952
    for idx, coordinates in enumerate(prediction_boxes):
        class_index = detected_class_indexes[idx]
        class_name = class_catalog[class_index]
        found_objs.append(class_name)
        print(class_name, coordinates)

    #Clean up ram
    #del(predictor)
    del(outputs)
    del(instances)
    del(detected_class_indexes)
    del(prediction_boxes)
    del(metadata)
    del(class_catalog)
    
    eyes_busy = False
    
    #cmd = f"lumi predict --checkpoint=fast".split(" ")
    #cmd.append(file_name)  
    #OUT = subprocess.Popen(cmd, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
    #stdout,stderr = OUT.communicate()
    #stdout = stdout.decode().strip()
    #if "\n" in stdout:
    #    raw_text = stdout.split('\n')[-1]
    #    result = json.loads(raw_text)
    #    eyes_busy = False
    #    for obj in result['objects']:
    #        name = obj['label']
    #        if name not in found_objs:
    #            found_objs.append(name)
    if found_objs != []:
        new_name = "_".join(found_objs)
        old_name = file_name.split('/')[-1]
        full_new_name = os.path.dirname(file_name)
        full_new_name = f"{full_new_name}/{new_name}_{old_name}"
        os.rename(file_name, full_new_name)
        print(found_objs)
    return(found_objs)
    #print(stderr)

def main_cam_loop(cam_name, dev_name):
    global exit
    global port
    
    streamer = Streamer(port, require_login)
    print(f"Sarting {cam_name}, Dev: {dev_name}, on port: {port}")
    port = port + 1
    
    cap = cv2.VideoCapture(dev_name)
    if cap.isOpened():
        ret,frame = cap.read()
    else:
        ret =False
    if not ret:
        print(f"Error with {dev_name} {cam_name}")
        return
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

    ret,frame1 = cap.read()
    ret,frame2 = cap.read()

    back_log = []
    move_frames = 0
    event_path = ""
    sleepy_frames = 0 # used to keep track of number of non moving frames
    video_to_save = ""
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    start_time = time.time()
    frame_count = 0
    fps = 1


    while not exit:
        ret,frame = cap.read()
        #frame = cv2.fastNlMeansDenoisingColored(frame,None,10,10,7,21)
        time_now = time.time()
        time_taken = time_now - start_time
        start_time = time_now
        fps = 1/time_taken
        print(f"{cam_name} FPS: {fps}")
        d=cv2.absdiff(frame1,frame2)
        try:
            grey=cv2.cvtColor(d,cv2.COLOR_BGR2GRAY)
        except Exception:
            print("Error pulling frame from...")
            continue
        blur =cv2.GaussianBlur(grey,(5,5),0)
        ret,th=cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
        dilated=cv2.dilate(th,np.ones((3,3),np.uint8),iterations=3)
        #img,c,h=cv2.findContours(dilated,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        
        
        #Check if frame is moving
        if len(contours) > too_many_motion_points:
            count = len(contours)
            print(f"Warning, Too much moving! {count}")
            contours = []
        found_moving = False
        for i in contours:
            if cv2.contourArea(i) < min_move_area:
                continue
            (x, y, w, h) = cv2.boundingRect(i)
            size = w * h
            
            if size > triger_move_area:
                found_moving = True
                if move_frames > num_frames_needed_to_trigger:
                    ts = time.strftime('%l:%M%p_(+%S)_on_%b_%d_%Y')
                    local_event_base = f"{event_base}/{cam_name}"
                    if not os.path.isdir(local_event_base):
                        os.mkdir(local_event_base)
                    #ts = f"{ }_at_{ts}"
                    if event_path == "":
                        event_path = f"{local_event_base}/{ts}/"
                        print(f"New event: {ts}")
                        os.mkdir(event_path)
                        #video_to_save = cv2.VideoWriter(f'{script_dir}/{ts}/event.avi', -1, 20.0, (1920,1080))
                        height, width = frame1.shape[:2]
                        shape = (width, height)
                        video_to_save = cv2.VideoWriter(f'{local_event_base}/{ts}/event.mp4', fourcc, fps, shape)
                    cv2.rectangle(frame1, (x-boarder, y-boarder), (x + w+boarder, y + h+boarder), (0, 0, 255), 2)

                    #expand boarder around moving item
                    x_start = x-boarder
                    if x_start < 0:
                        x_start = 0
                    x_end = x + w+boarder
                    if x_end > frame1.shape[1]:
                        x_end = frame1.shape[1]
                    y_start = y-boarder
                    if y_start < 0:
                        y_start = 0
                    y_end = y + h+boarder
                    if y_end > frame1.shape[0]:
                        y_end = frame1.shape[0]

                    
                    #crop = frame1[50:100, 1620:1920]
                    #print("Shape of the cut", crop.shape)
                    
                    #If eye thread is not busy, check what is moving
                    #TODO If busy for too long do it anyway. 
                    if not eyes_busy:
                        crop = frame[y_start:y_end, x_start:x_end]
                        file_name = f"{event_path}{move_frames}.jpg"
                        cv2.imwrite(file_name, crop)
                        #cv2.imshow("cropped", crop)
                        back_log.append(Thread(target = what_is_this, args=[file_name]))
                        back_log[-1].setDaemon(True)
                        back_log[-1].start()
                    else:
                        print("Warning, skiping process")
                #Movment but not a triger yet
                else:
                    cv2.rectangle(frame1, (x-boarder, y-boarder),  (x + w+boarder, y + h+boarder), (255, 0, 0), 2)

        #Save frame to video
        print(video_to_save)
        if video_to_save != "":
            video_to_save.write(frame1)
        
        streamer.update_frame(frame1)
        if not streamer.is_streaming:
            streamer.start_streaming()
        
        if found_moving:
            move_frames = move_frames + 1
        else:
            #End of event
            if sleepy_frames > num_frames_needed_to_end_trigger:
                move_frames = 0
                event_path = ""
                sleepy_frames = 0
                if video_to_save != "":
                    video_to_save.release()
                    video_to_save = ""
            else:
                sleepy_frames = sleepy_frames + 1
        
        cv2.drawContours(frame1,contours,-1,(0,255,255),2)
        #cv2.imshow("inter",frame1)
        
        if cv2.waitKey(40) == 27:
            exit = True
        frame1 = frame2
        ret,frame2= cap.read()
    cv2.destroyAllWindows()
    #VideoFileOutput.release()
    cap.release()

cam_threads = [] 
for cap_dev in cam_devices:
    print(f"Starting {cap_dev} {cam_devices[cap_dev]}")
    cam_threads.append(Thread(target=main_cam_loop, args=[cap_dev, cam_devices[cap_dev]]))
    cam_threads[-1].setDaemon(True)
    cam_threads[-1].start()
    time.sleep(3)

def on_exit(signal_received, frame):
    global exit
    exit = True
    # Handle any cleanup here
    print('SIGINT or CTRL-C detected. Exiting gracefully')

signal(SIGINT, on_exit)
while not exit:
    time.sleep(1)

D security sensors/settings.txt => security sensors/settings.txt +0 -5
@@ 1,5 0,0 @@
#Name~Device
Door~rtsp://10.250.10.225:554/Streaming/Channes/ID/?transportmode=unicast
#Room~0
#Window~4
#Shed~2

D security sensors/setup.txt => security sensors/setup.txt +0 -22
@@ 1,22 0,0 @@
https://github.com/facebookresearch/detectron2/blob/main/INSTALL.md
sudo pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
sudo python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git'

#sudo pip3 install cython; pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'

#Old
Install pip2
#CPU
sudo pip2 install "tensorflow<2.0"
sudo pip2 install luminoth

#GPU
sudo pip2 install "tensorflow-gpu<2.0"
sudo pip2 install luminoth[tf-gpu]

lumi checkpoint refresh
lumi checkpoint list
lumi checkpoint download accurate
lumi checkpoint download fast

lumi predict --checkpoint=fast <Img>

D security sensors/startup.sh => security sensors/startup.sh +0 -5
@@ 1,5 0,0 @@
#!/bin/bash
#Limit memory use
cd "$(dirname "$0")"
export LRU_CACHE_CAPACITY=1
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2 python3 ./sensors.py

D security sensors/templates/fail.html => security sensors/templates/fail.html +0 -15
@@ 1,15 0,0 @@
<!doctype html>
<html>

<body>
    <div id="reason" style="display: none;">
        {{reason}}
    </div>

    <script>
        alert(document.getElementById("reason").innerText);
        location.href = "../change%20password";
    </script>
</body>

</html>
\ No newline at end of file

D security sensors/templates/form.html => security sensors/templates/form.html +0 -33
@@ 1,33 0,0 @@
<!doctype html>
<html>

<body>
    <center>
        <form action="../change%20password%20result" method="POST">
            <table>
                <tr>
                    <th>Username:</th>
                    <th><input type="text" name="username" /></th>
                </tr>
                <tr>
                    <th>Old Password</th>
                    <th><input type="password" name="old_pw" /></th>
                </tr>
                <tr>
                    <th>New Password</th>
                    <th><input type="password" name="pw" /></th>
                </tr>
                <tr>
                    <th>(Confirm)</th>
                    <th><input type="password" name="pw_conf" /></th>
                </tr>
                <tr>
                    <th><input type="submit" value="Change Password" /></th>
                    <th></th>
                </tr>
            </table>
        </form>
    </center>
</body>

</html>
\ No newline at end of file

D security sensors/templates/index.html => security sensors/templates/index.html +0 -9
@@ 1,9 0,0 @@
<html>
  <head>
    <title>Video Streaming Demonstration</title>
  </head>
  <body>
    <h1>Video Streaming Demonstration</h1>
    <img src="{{ url_for('video_feed') }}">
  </body>
</html>

D security sensors/templates/pass.html => security sensors/templates/pass.html +0 -15
@@ 1,15 0,0 @@
<!doctype html>
<html>

<body>
    <script>
        alert("Sucessfully changed password!");
        location.href = "..";
    </script>

    <p>
        {{reason}}
    </p>
</body>

</html>
\ No newline at end of file