~hamner/HomeButler

d81be9c67df601a19091734c215a221718f64617 — David Hamner 1 year, 3 months ago 97ab9f8
Add init security sensors and switch to LGPL
M README.md => README.md +13 -10
@@ 3,24 3,27 @@
 * Start home_butler.py
 * Run vosk_dump.py to pipe words to home_butler.py

 # GPL3 baby! #
 # LGPL3 baby! #

# Testing #

 * PYTHONPATH=$PYTHONPATH:/path/to/install ./scriptname.py

home-butler
Copyright (C) 2022  David Hamner
GNU LGPL v3
Copyright (C) 2022 David Hamner
hamner@librem.one

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
Lesser General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.
You should have received a copy of the GNU Lesser General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.

M butler_common.py => butler_common.py +12 -9
@@ 1,19 1,22 @@
#!/usr/bin/python3
#home-butler
#Copyright (C) 2022  David Hamner
#GNU LGPL v3
#Copyright (C) 2022 David Hamner
#hamner@librem.one

#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 3 of the License, or (at your option) any later version.

#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#GNU General Public License for more details.
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#Lesser General Public License for more details.

#You should have received a copy of the GNU General Public License
#along with this program.  If not, see <http://www.gnu.org/licenses/>.
#You should have received a copy of the GNU Lesser General Public License
#along with this program; if not, write to the Free Software Foundation,
#Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
import os
import glob
import subprocess

M home_butler.py => home_butler.py +62 -11
@@ 1,20 1,26 @@
#!/usr/bin/python3
#home-butler
#Copyright (C) 2022  David Hamner
#GNU LGPL v3
#Copyright (C) 2022 David Hamner
#hamner@librem.one

#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 3 of the License, or (at your option) any later version.

#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#GNU General Public License for more details.
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#Lesser General Public License for more details.

#You should have received a copy of the GNU General Public License
#along with this program.  If not, see <http://www.gnu.org/licenses/>.
from flask import Flask
#You should have received a copy of the GNU Lesser General Public License
#along with this program; if not, write to the Free Software Foundation,
#Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.


#sudo pip3 install flask_opencv_streamer
from flask import Flask, send_file
import os
import threading
import subprocess


@@ 135,12 141,13 @@ def css():

def menu_html():
    return """

    
        <div class="menu">
            <a href="/GUI/welcome">Logout</a>
            <a href="/GUI/api_cmd">Static</a>
            <a href="/GUI/voice_cmd">Voice</a>
            <a href="/GUI/ears">Ears</a>
            <a href="/GUI/sensors">Sensors</a>
        </div>"""
    
@app.route("/voice_cmd_update/<voice_cmd>")


@@ 158,6 165,50 @@ def process_voice_cmd_part(voice_cmd):
    return(voice_cmd)
    

@app.route("/sensors")
def display_sensors():
    html = """
    <html>
        <head>
            <title>Video Streaming Demonstration</title>
        </head>
        <body>
            <h1>Video Streaming Demonstration</h1>
            <img src="http://10.250.10.133:2468/video_feed">
        </body>
    </html>
    """
    return html





@app.route('/video_feed')
def video_feed():
    return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame')


def gen():
    """Video streaming generator function."""
    global img
    while True:
        try:
            cv2.imwrite('t.jpg', img)
            yield(b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + open('t.jpg', 'rb').read() + b'\r\n')
        except NameError as e:
            print(e)



"""
@app.route("/security sensors/<path>")
def DownloadLogFile (path = None):
    print(f"{script_dir}/security sensors/{path}")
    return send_file(f"{script_dir}/security sensors/{path}", as_attachment=True)
"""

@app.route("/voice_cmd_reset")
def reset_voice_cmd():
    global current_voice_cmd

A lgpl-3.0.txt => lgpl-3.0.txt +165 -0
@@ 0,0 1,165 @@
                   GNU LESSER GENERAL PUBLIC LICENSE
                       Version 3, 29 June 2007

 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.


  This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.

  0. Additional Definitions.

  As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.

  "The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.

  An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.

  A "Combined Work" is a work produced by combining or linking an
Application with the Library.  The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".

  The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.

  The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.

  1. Exception to Section 3 of the GNU GPL.

  You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.

  2. Conveying Modified Versions.

  If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:

   a) under this License, provided that you make a good faith effort to
   ensure that, in the event an Application does not supply the
   function or data, the facility still operates, and performs
   whatever part of its purpose remains meaningful, or

   b) under the GNU GPL, with none of the additional permissions of
   this License applicable to that copy.

  3. Object Code Incorporating Material from Library Header Files.

  The object code form of an Application may incorporate material from
a header file that is part of the Library.  You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:

   a) Give prominent notice with each copy of the object code that the
   Library is used in it and that the Library and its use are
   covered by this License.

   b) Accompany the object code with a copy of the GNU GPL and this license
   document.

  4. Combined Works.

  You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:

   a) Give prominent notice with each copy of the Combined Work that
   the Library is used in it and that the Library and its use are
   covered by this License.

   b) Accompany the Combined Work with a copy of the GNU GPL and this license
   document.

   c) For a Combined Work that displays copyright notices during
   execution, include the copyright notice for the Library among
   these notices, as well as a reference directing the user to the
   copies of the GNU GPL and this license document.

   d) Do one of the following:

       0) Convey the Minimal Corresponding Source under the terms of this
       License, and the Corresponding Application Code in a form
       suitable for, and under terms that permit, the user to
       recombine or relink the Application with a modified version of
       the Linked Version to produce a modified Combined Work, in the
       manner specified by section 6 of the GNU GPL for conveying
       Corresponding Source.

       1) Use a suitable shared library mechanism for linking with the
       Library.  A suitable mechanism is one that (a) uses at run time
       a copy of the Library already present on the user's computer
       system, and (b) will operate properly with a modified version
       of the Library that is interface-compatible with the Linked
       Version.

   e) Provide Installation Information, but only if you would otherwise
   be required to provide such information under section 6 of the
   GNU GPL, and only to the extent that such information is
   necessary to install and execute a modified version of the
   Combined Work produced by recombining or relinking the
   Application with a modified version of the Linked Version. (If
   you use option 4d0, the Installation Information must accompany
   the Minimal Corresponding Source and Corresponding Application
   Code. If you use option 4d1, you must provide the Installation
   Information in the manner specified by section 6 of the GNU GPL
   for conveying Corresponding Source.)

  5. Combined Libraries.

  You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:

   a) Accompany the combined library with a copy of the same work based
   on the Library, uncombined with any other library facilities,
   conveyed under the terms of this License.

   b) Give prominent notice with the combined library that part of it
   is a work based on the Library, and explaining where to find the
   accompanying uncombined form of the same work.

  6. Revised Versions of the GNU Lesser General Public License.

  The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.

  Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.

  If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

A py_wand/code.py => py_wand/code.py +64 -0
@@ 0,0 1,64 @@
import board
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.nordic import UARTService
import board
import busio
from microcontroller import Pin

import time
import board
import adafruit_mpu6050



from digitalio import DigitalInOut, Direction, Pull

# LED setup.
led = DigitalInOut(board.LED)
# For QT Py M0. QT Py M0 does not have a D13 LED, so you can connect an external LED instead.
# led = DigitalInOut(board.SCK)
led.direction = Direction.OUTPUT

# For Gemma M0, Trinket M0, Metro M0 Express, ItsyBitsy M0 Express, Itsy M4 Express, QT Py M0
switch = DigitalInOut(board.D5)
# switch = DigitalInOut(board.D5)  # For Feather M0 Express, Feather M4 Express
# switch = DigitalInOut(board.D7)  # For Circuit Playground Express
switch.direction = Direction.INPUT
switch.pull = Pull.UP


i2c = board.I2C()  # uses board.SCL and board.SDA
mpu = adafruit_mpu6050.MPU6050(i2c)

ble = BLERadio()
uart = UARTService()
advertisement = ProvideServicesAdvertisement(uart)

while True:
    ble.start_advertising(advertisement)
    print("Waiting to connect")
    while not ble.connected:
        pass
    print("Connected")
    counter = 0
    while ble.connected:
        
        if switch.value:
            led.value = False
            counter = counter + 1
            if counter > 10:
                uart.write("\n")
                counter = 0
        else:
            led.value = True
            # Use the object to print the sensor readings
            print("Acceleration: X:%.2f, Y: %.2f, Z: %.2f m/s^2" % (mpu.acceleration))
            print("Gyro X:%.2f, Y: %.2f, Z: %.2f rad/s" % (mpu.gyro))
            print("Temperature: %.2f C" % mpu.temperature)
            data_str = f"{mpu.acceleration}:{mpu.gyro}\n"
            #print(data_str)
            uart.write(data_str)
            time.sleep(.2065)
        time.sleep(0.02)


A py_wand/notes.txt => py_wand/notes.txt +6 -0
@@ 0,0 1,6 @@
1 x Adafruit Feather nRF52840 Express[ID:4062] = $24.95
1 x MPU-6050
Install FW from: adafruit circuitpython
copy lib from https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases
host libs:
pip3 install --upgrade adafruit-blinka-bleio adafruit-circuitpython-ble

A security sensors/sensors.py => security sensors/sensors.py +328 -0
@@ 0,0 1,328 @@
#!/usr/bin/python3
#home-butler
#GNU LGPL v3
#Copyright (C) 2022 David Hamner
#hamner@librem.one

#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 3 of the License, or (at your option) any later version.

#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#Lesser General Public License for more details.

#You should have received a copy of the GNU Lesser General Public License
#along with this program; if not, write to the Free Software Foundation,
#Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.

import cv2
import numpy as np
from datetime import datetime
import time
from threading import Thread
import subprocess
import os
import json
from signal import signal, SIGINT
from flask_opencv_streamer.streamer import Streamer
import torch, detectron2
TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
CUDA_VERSION = torch.__version__.split("+")[-1]
print("torch: ", TORCH_VERSION, "; cuda: ", CUDA_VERSION)
print("detectron2:", detectron2.__version__)
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)


port = 2468
require_login = False


#Thank you to: https://github.com/abhishek305/Motion-capture-using-opencv-contours/blob/master/Contours%20Opencv.py
#Some of this was based on ^^^
script_path = os.path.realpath(os.path.abspath(__file__))
script_dir = os.path.dirname(script_path)
event_base = f"{script_dir}/events/"
if not os.path.isdir(event_base):
    os.mkdir(event_base)

#Main settings
boarder = 70 
num_frames_needed_to_trigger = 3
num_frames_needed_to_end_trigger = 10
min_move_area = 600
triger_move_area = 12000
too_many_motion_points =  400

max_back_log = 3
eyes_busy = False


#cam_devices= {"Potato": "rtsp://10.250.10.225:554/Streaming/Channes/ID/?transportmode=unicast"}
#cap=cv2.VideoCapture("rtsp://10.250.10.225:554/Streaming/Channes/ID/?transportmode=unicast")

exit = False


def load_settings():
    cam_devices = {}
    settings_file_path = f"{script_dir}/settings.txt"
    with open(settings_file_path) as settings_file:
        for config_line in settings_file.readlines():
            if config_line.startswith("#") or config_line.strip() == "":
                continue
            raw_data = config_line.strip().split("~")
            name = raw_data[0]
            device = raw_data[1]
            cam_devices[name] = device
    return(cam_devices)
cam_devices = load_settings()
print(cam_devices)


def what_is_this(file_name):
    global eyes_busy
    global predictor
    eyes_busy = True
    found_objs = []
    
    im = cv2.imread(file_name)

    with torch.no_grad():
        outputs = predictor(im)
    instances = outputs["instances"]
    detected_class_indexes = instances.pred_classes
    prediction_boxes = instances.pred_boxes
    metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
    class_catalog = metadata.thing_classes

    #Thank you to: https://stackoverflow.com/a/68471952
    for idx, coordinates in enumerate(prediction_boxes):
        class_index = detected_class_indexes[idx]
        class_name = class_catalog[class_index]
        found_objs.append(class_name)
        print(class_name, coordinates)

    #Clean up ram
    #del(predictor)
    del(outputs)
    del(instances)
    del(detected_class_indexes)
    del(prediction_boxes)
    del(metadata)
    del(class_catalog)
    
    eyes_busy = False
    
    #cmd = f"lumi predict --checkpoint=fast".split(" ")
    #cmd.append(file_name)  
    #OUT = subprocess.Popen(cmd, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
    #stdout,stderr = OUT.communicate()
    #stdout = stdout.decode().strip()
    #if "\n" in stdout:
    #    raw_text = stdout.split('\n')[-1]
    #    result = json.loads(raw_text)
    #    eyes_busy = False
    #    for obj in result['objects']:
    #        name = obj['label']
    #        if name not in found_objs:
    #            found_objs.append(name)
    if found_objs != []:
        new_name = "_".join(found_objs)
        old_name = file_name.split('/')[-1]
        full_new_name = os.path.dirname(file_name)
        full_new_name = f"{full_new_name}/{new_name}_{old_name}"
        os.rename(file_name, full_new_name)
        print(found_objs)
    return(found_objs)
    #print(stderr)

def main_cam_loop(cam_name, dev_name):
    global exit
    global port
    
    streamer = Streamer(port, require_login)
    print(f"Sarting {cam_name}, Dev: {dev_name}, on port: {port}")
    port = port + 1
    
    cap = cv2.VideoCapture(dev_name)
    if cap.isOpened():
        ret,frame = cap.read()
    else:
        ret =False
    if not ret:
        print(f"Error with {dev_name} {cam_name}")
        return
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

    ret,frame1 = cap.read()
    ret,frame2 = cap.read()

    back_log = []
    move_frames = 0
    event_path = ""
    sleepy_frames = 0 # used to keep track of number of non moving frames
    video_to_save = ""
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    start_time = time.time()
    frame_count = 0
    fps = 1


    while not exit:
        ret,frame = cap.read()
        #frame = cv2.fastNlMeansDenoisingColored(frame,None,10,10,7,21)
        time_now = time.time()
        time_taken = time_now - start_time
        start_time = time_now
        fps = 1/time_taken
        print(f"{cam_name} FPS: {fps}")
        d=cv2.absdiff(frame1,frame2)
        try:
            grey=cv2.cvtColor(d,cv2.COLOR_BGR2GRAY)
        except Exception:
            print("Error pulling frame from...")
            continue
        blur =cv2.GaussianBlur(grey,(5,5),0)
        ret,th=cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
        dilated=cv2.dilate(th,np.ones((3,3),np.uint8),iterations=3)
        #img,c,h=cv2.findContours(dilated,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        
        
        #Check if frame is moving
        if len(contours) > too_many_motion_points:
            count = len(contours)
            print(f"Warning, Too much moving! {count}")
            contours = []
        found_moving = False
        for i in contours:
            if cv2.contourArea(i) < min_move_area:
                continue
            (x, y, w, h) = cv2.boundingRect(i)
            size = w * h
            
            if size > triger_move_area:
                found_moving = True
                if move_frames > num_frames_needed_to_trigger:
                    ts = time.strftime('%l:%M%p_(+%S)_on_%b_%d_%Y')
                    local_event_base = f"{event_base}/{cam_name}"
                    if not os.path.isdir(local_event_base):
                        os.mkdir(local_event_base)
                    #ts = f"{ }_at_{ts}"
                    if event_path == "":
                        event_path = f"{local_event_base}/{ts}/"
                        print(f"New event: {ts}")
                        os.mkdir(event_path)
                        #video_to_save = cv2.VideoWriter(f'{script_dir}/{ts}/event.avi', -1, 20.0, (1920,1080))
                        height, width = frame1.shape[:2]
                        shape = (width, height)
                        video_to_save = cv2.VideoWriter(f'{local_event_base}/{ts}/event.mp4', fourcc, fps, shape)
                    cv2.rectangle(frame1, (x-boarder, y-boarder), (x + w+boarder, y + h+boarder), (0, 0, 255), 2)

                    #expand boarder around moving item
                    x_start = x-boarder
                    if x_start < 0:
                        x_start = 0
                    x_end = x + w+boarder
                    if x_end > frame1.shape[1]:
                        x_end = frame1.shape[1]
                    y_start = y-boarder
                    if y_start < 0:
                        y_start = 0
                    y_end = y + h+boarder
                    if y_end > frame1.shape[0]:
                        y_end = frame1.shape[0]

                    
                    #crop = frame1[50:100, 1620:1920]
                    #print("Shape of the cut", crop.shape)
                    
                    #If eye thread is not busy, check what is moving
                    #TODO If busy for too long do it anyway. 
                    if not eyes_busy:
                        crop = frame[y_start:y_end, x_start:x_end]
                        file_name = f"{event_path}{move_frames}.jpg"
                        cv2.imwrite(file_name, crop)
                        #cv2.imshow("cropped", crop)
                        back_log.append(Thread(target = what_is_this, args=[file_name]))
                        back_log[-1].setDaemon(True)
                        back_log[-1].start()
                    else:
                        print("Warning, skiping process")
                #Movment but not a triger yet
                else:
                    cv2.rectangle(frame1, (x-boarder, y-boarder),  (x + w+boarder, y + h+boarder), (255, 0, 0), 2)

        #Save frame to video
        print(video_to_save)
        if video_to_save != "":
            video_to_save.write(frame1)
        
        streamer.update_frame(frame1)
        if not streamer.is_streaming:
            streamer.start_streaming()
        
        if found_moving:
            move_frames = move_frames + 1
        else:
            #End of event
            if sleepy_frames > num_frames_needed_to_end_trigger:
                move_frames = 0
                event_path = ""
                sleepy_frames = 0
                if video_to_save != "":
                    video_to_save.release()
                    video_to_save = ""
            else:
                sleepy_frames = sleepy_frames + 1
        
        cv2.drawContours(frame1,contours,-1,(0,255,255),2)
        #cv2.imshow("inter",frame1)
        
        if cv2.waitKey(40) == 27:
            exit = True
        frame1 = frame2
        ret,frame2= cap.read()
    cv2.destroyAllWindows()
    #VideoFileOutput.release()
    cap.release()

cam_threads = [] 
for cap_dev in cam_devices:
    print(f"Starting {cap_dev} {cam_devices[cap_dev]}")
    cam_threads.append(Thread(target=main_cam_loop, args=[cap_dev, cam_devices[cap_dev]]))
    cam_threads[-1].setDaemon(True)
    cam_threads[-1].start()
    time.sleep(3)

def on_exit(signal_received, frame):
    global exit
    exit = True
    # Handle any cleanup here
    print('SIGINT or CTRL-C detected. Exiting gracefully')

signal(SIGINT, on_exit)
while not exit:
    time.sleep(1)

A security sensors/settings.txt => security sensors/settings.txt +5 -0
@@ 0,0 1,5 @@
#Name~Device
Door~rtsp://10.250.10.225:554/Streaming/Channes/ID/?transportmode=unicast
#Room~0
#Window~4
#Shed~2

A security sensors/setup.txt => security sensors/setup.txt +22 -0
@@ 0,0 1,22 @@
https://github.com/facebookresearch/detectron2/blob/main/INSTALL.md
sudo pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
sudo python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git'

#sudo pip3 install cython; pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'

#Old
Install pip2
#CPU
sudo pip2 install "tensorflow<2.0"
sudo pip2 install luminoth

#GPU
sudo pip2 install "tensorflow-gpu<2.0"
sudo pip2 install luminoth[tf-gpu]

lumi checkpoint refresh
lumi checkpoint list
lumi checkpoint download accurate
lumi checkpoint download fast

lumi predict --checkpoint=fast <Img>

A security sensors/startup.sh => security sensors/startup.sh +5 -0
@@ 0,0 1,5 @@
#!/bin/bash
#Limit memory use
cd "$(dirname "$0")"
export LRU_CACHE_CAPACITY=1
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2 python3 ./sensors.py

A security sensors/templates/fail.html => security sensors/templates/fail.html +15 -0
@@ 0,0 1,15 @@
<!doctype html>
<html>

<body>
    <div id="reason" style="display: none;">
        {{reason}}
    </div>

    <script>
        alert(document.getElementById("reason").innerText);
        location.href = "../change%20password";
    </script>
</body>

</html>
\ No newline at end of file

A security sensors/templates/form.html => security sensors/templates/form.html +33 -0
@@ 0,0 1,33 @@
<!doctype html>
<html>

<body>
    <center>
        <form action="../change%20password%20result" method="POST">
            <table>
                <tr>
                    <th>Username:</th>
                    <th><input type="text" name="username" /></th>
                </tr>
                <tr>
                    <th>Old Password</th>
                    <th><input type="password" name="old_pw" /></th>
                </tr>
                <tr>
                    <th>New Password</th>
                    <th><input type="password" name="pw" /></th>
                </tr>
                <tr>
                    <th>(Confirm)</th>
                    <th><input type="password" name="pw_conf" /></th>
                </tr>
                <tr>
                    <th><input type="submit" value="Change Password" /></th>
                    <th></th>
                </tr>
            </table>
        </form>
    </center>
</body>

</html>
\ No newline at end of file

A security sensors/templates/index.html => security sensors/templates/index.html +9 -0
@@ 0,0 1,9 @@
<html>
  <head>
    <title>Video Streaming Demonstration</title>
  </head>
  <body>
    <h1>Video Streaming Demonstration</h1>
    <img src="{{ url_for('video_feed') }}">
  </body>
</html>

A security sensors/templates/pass.html => security sensors/templates/pass.html +15 -0
@@ 0,0 1,15 @@
<!doctype html>
<html>

<body>
    <script>
        alert("Sucessfully changed password!");
        location.href = "..";
    </script>

    <p>
        {{reason}}
    </p>
</body>

</html>
\ No newline at end of file