You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
ff/ippigeon.py

161 lines
4.8 KiB
Python

import os
import sys
import subprocess
from multiprocessing import Process, Manager, Pool, TimeoutError, freeze_support, active_children
from sys import platform
from time import sleep
import uuid
import yaml
from util import find_data_file
from util import fprint
import taskbartool
import util
import netstat
import ssh
import auth
displaydata = None
settings = None
netdata_res = None
procdata_res = None
killme = None
datafile = ""
#print(datafile)
config = None
interval = 10
win32 = platform == "win32"
linux = platform == "linux" or platform == "linux2"
macos = platform == "darwin"
# Get unique system values
if win32:
sysid = hex(uuid.getnode())
datafile += sysid
datafile += "gendata.csv"
# Python is running as Administrator (so netstat can get filename, to block, etc),
# so we use this to see who is actually logged in
# it's very hacky
startupinfo = subprocess.STARTUPINFO()
#if not getattr(sys, "frozen", False):
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # hide powershell window
res = subprocess.check_output(["WMIC", "ComputerSystem", "GET", "UserName"], universal_newlines=True, startupinfo=startupinfo)
_, username = res.strip().rsplit("\n", 1)
userid, sysdom = username.rsplit("\\", 1)
def netstat_done(res):
fprint("netstat done, processing")
procdata_res = pool.apply_async(netstat.process, (res,), callback=process_done)
#netstat.process(res)
def process_done(res):
fprint("uploading to sftp...")
#ssh.sftp_send_data(res, config, datafile)
procdata_res = pool.apply_async(ssh.sftp_send_data, (config, datafile, 'send'))
def login_done(res):
if not res:
fprint("Login failure")
settings["message"] = "Login failure"
else:
fprint("Login result in main: " + str(res))
def killall():
kids = active_children()
for kid in kids:
kid.kill()
fprint("Every child has been killed")
os.kill(os.getpid(), 9) # dirty kill of self
def mainloop(pool):
# worker pool: netstat, netstat cleanup, upload, download, ui tasks
global counter
global netdata_res
global procdata_res
global rawdata
global killme
#print(killme)
if killme.value > 0:
#print("killing")
killall()
#print(res.get(timeout=1))
if counter == 0: # runs every INTERVAL
fprint("start loop")
if netdata_res is None or netdata_res.ready():
#rawdata = netdata_res.get()
#procdata_res = pool.apply_async(process_netstat, (rawdata))
fprint("netstat starting")
netdata_res = pool.apply_async(netstat.start, callback=netstat_done)
#fprint(netdata_res.successful())
# runs every 50ms
if settings["login"] == True:
login_res = pool.apply_async(auth.login, (config, settings["username"], settings["password"], sysid), callback=login_done)
#fprint(auth.login(config, settings["username"], settings["password"], sysid))
settings["login"] = False
sleep(interval / (interval * 20.0))
counter += 1
if counter == interval * 20:
counter = 0
class Logger(object):
def __init__(self, filename="output.log"):
self.log = open(filename, "a")
self.terminal = sys.stdout
def write(self, message):
self.log.write(message)
#close(filename)
#self.log = open(filename, "a")
try:
self.terminal.write(message)
except:
sleep(0)
def flush(self):
print("")
if __name__ == '__main__':
freeze_support() # required if packaged into single EXE
# create manager to share data to me, background, foreground
# create worker pool
sys.stdout = Logger(filename=find_data_file("output.log"))
sys.stderr = Logger(filename=find_data_file("output.log"))
with Pool(processes=5) as pool:
with Manager() as manager:
with open(find_data_file('config.yml'), 'r') as file:
#global config
config = yaml.safe_load(file)
#print(config['sftp']['host'])
interval = config['core']['interval']
displaydata = manager.list(range(2)) # data to be printed
settings = manager.dict() # configuration
settings["login"] = False
settings["loggedin"] = False
killme = manager.Value('d', 0)
#killme = False
# launch background UI app as process
p = Process(target=taskbartool.background, args=(displaydata,settings,killme))
p.start()
#p.join() # not a foreground job, so let's not join it
keeprunning = True
# initial setup
#netdata_res = pool.apply_async(netstat, callback=netstat_done)
# launch loop - non-blocking!
counter = 0
while(keeprunning):
mainloop(pool)