|
|
|
@ -20,6 +20,9 @@ class analytics :
|
|
|
|
|
self.cache['key'] = args['key']
|
|
|
|
|
self.cache['name'] = self.__class__.__name__
|
|
|
|
|
self.init()
|
|
|
|
|
def get_formatted_date(self,row):
|
|
|
|
|
m = {1:"Jan",2:"Feb",3:"Mar",4:"Apr",5:"May",6:"Jun",7:"Jul",8:"Aug",9:"Sep",10:"Oct",11:"Nov",12:"Dec"}
|
|
|
|
|
return "-".join([m[row['month']],str(row['day']),str(row['year'])]) +" "+ " ".join([str(row['hour']),'h :',str(row['minute']),'min' ])
|
|
|
|
|
def init(self):
|
|
|
|
|
"""
|
|
|
|
|
Store logs as is per node i.e this enables to see the latest pull
|
|
|
|
@ -83,7 +86,7 @@ class apps(analytics) :
|
|
|
|
|
"""
|
|
|
|
|
analytics.init(self)
|
|
|
|
|
grid = {"width":"100%","editing":False,"rowClass":"small"}
|
|
|
|
|
grid['fields'] = [{"name":"name","title":"Process","headercss":"small"},{"name":"cpu","title":"CPU Usage","headercss":"small","type":"number"},{"name":"mem","title":"RAM Usage","headercss":"small","type":"number"},{"name":"status","title":"Status","headercss":"small","align":"center"}]
|
|
|
|
|
grid['fields'] = [{"name":"name","title":"Process","headercss":"small"},{"name":"cpu","title":"CPU Usage","headercss":"small","type":"number"},{"name":"mem","title":"RAM Usage","headercss":"small","type":"number"},{"name":"status","title":"Status","headercss":"small","align":"center","width":"32px"}]
|
|
|
|
|
self.set('grid',grid)
|
|
|
|
|
|
|
|
|
|
def summary(self,data):
|
|
|
|
@ -92,15 +95,17 @@ class apps(analytics) :
|
|
|
|
|
In terms of {crash,idle,running} counts
|
|
|
|
|
"""
|
|
|
|
|
r = []
|
|
|
|
|
|
|
|
|
|
for node in data :
|
|
|
|
|
logs = data[node]['logs']
|
|
|
|
|
date = data[node]['date']['long']
|
|
|
|
|
formatted_date = self.get_formatted_date(data[node]['date'])
|
|
|
|
|
df = pd.DataFrame(logs)
|
|
|
|
|
df = df[df.name.str.contains('other',na=False)==False]
|
|
|
|
|
crash = df.status.str.contains('X').sum()
|
|
|
|
|
idle = df.status.str.contains('S').sum()
|
|
|
|
|
running = df.shape[0] - crash - idle
|
|
|
|
|
r.append({"date":date,"node":node,"running":running,"idle":idle,"crash":crash})
|
|
|
|
|
r.append({"date":date,"node":node,"running":running,"idle":idle,"crash":crash,"formatted_date":formatted_date})
|
|
|
|
|
return r
|
|
|
|
|
|
|
|
|
|
# logs = pd.DataFrame(self.get('logs'))
|
|
|
|
@ -137,11 +142,12 @@ class apps(analytics) :
|
|
|
|
|
X = [[other_df.cpu.sum(),watch_df.cpu.sum()],[other_df.mem.sum(),watch_df.mem.sum()]]
|
|
|
|
|
|
|
|
|
|
date= data[node]['date']['long']
|
|
|
|
|
formatted_date = self.get_formatted_date(data[node]['date'])
|
|
|
|
|
q.append({"node":node, "x":X,"labels":labels, "title":title,"series":series,"ylabel":ylabel})
|
|
|
|
|
crash = watch_df.status.str.contains('X').sum()
|
|
|
|
|
idle = watch_df.status.str.contains('S').sum()
|
|
|
|
|
running = N - crash - idle
|
|
|
|
|
r.append( {"x":[crash,idle,running],"labels":['Crash','Idle','Running'],"title":"","date":date})
|
|
|
|
|
r.append( {"x":[crash,idle,running],"labels":['Crash','Idle','Running'],"title":"","date":date,"formatted_date":formatted_date})
|
|
|
|
|
|
|
|
|
|
self.set("resource",q)
|
|
|
|
|
self.set("status",r)
|
|
|
|
@ -187,12 +193,16 @@ class folders(analytics):
|
|
|
|
|
else:
|
|
|
|
|
max_size = 0
|
|
|
|
|
self.set('max_size',max_size)
|
|
|
|
|
q = []
|
|
|
|
|
for node in data :
|
|
|
|
|
df = pd.DataFrame(data[node]['logs'])
|
|
|
|
|
N = df.shape[0]
|
|
|
|
|
df = pd.DataFrame(df.mean()[['size_in_kb','files','age_in_days']]).T
|
|
|
|
|
|
|
|
|
|
r.append({"node":node,"folders":N, "max_size":max_size,"size":np.round(df.size_in_kb.values[0]*.000001,2),"age":df.age_in_days.values[0].round(2),"files":df.files.values[0].round(2)})
|
|
|
|
|
formatted_date = self.get_formatted_date(data[node]['date'])
|
|
|
|
|
r.append({"node":node,"folders":N, "max_size":max_size,"max_folder_size":np.round(df.size_in_kb.max()*.000001,2),"avg_folder_size":np.round(df.size_in_kb.mean()*.000001,2),"age":df.age_in_days.values[0].round(2),"avg_files_folder":df.files.max().round(2),"formatted_date":formatted_date})
|
|
|
|
|
X = [[np.round(df.size_in_kb.mean()*.000001,2),np.round(df.size_in_kb.max()*.000001,2)],[df.files.mean().round(2),df.files.max().round(2)]]
|
|
|
|
|
q.append({"node":node, "x":X,"labels":['Average','Largest'], "title":"","series":['Size (MB)','Files'],"ylabel":""})
|
|
|
|
|
self.set("analysis",q)
|
|
|
|
|
return r
|
|
|
|
|
# for id in logs :
|
|
|
|
|
|
|
|
|
|