|
|
|
"""
|
|
|
|
import pickle
|
|
|
|
else:
|
|
|
|
"MAX_EPOCHS":self.MAX_EPOCHS,
|
|
|
|
"ROW_COUNT":self.ROW_COUNT
|
|
|
|
}
|
|
|
|
if args and 'key' in args and 'value' in args :
|
|
|
|
key = args['key']
|
|
|
|
value= args['value']
|
|
|
|
object[key] = value
|
|
|
|
# suffix = "-".join(self.column) if isinstance(self.column,list) else self.column
|
|
|
|
suffix = self.get.suffix()
|
|
|
|
_name = os.sep.join([self.out_dir,'meta-'+suffix])
|
|
|
|
|
|
|
|
f = open(_name+'.json','w')
|
|
|
|
f.write(json.dumps(_object))
|
|
|
|
return _object
|
|
|
|
def mkdir (self,path):
|
|
|
|
if not os.path.exists(path) :
|
|
|
|
os.mkdir(path)
|
|
|
|
GNet.__init__(self,**args)
|
|
|
|
h1 = self.normalize(inputs=tf.matmul(x, kernel), name='cbn' + str(i),
|
|
|
|
labels=label, n_labels=self.NUM_LABELS)
|
|
|
|
h2 = tf.nn.tanh(h1)
|
|
|
|
x = x + h2
|
|
|
|
# This seems to be the output layer
|
|
|
|
#
|
|
|
|
kernel = self.get.variables(name='W_' + str(i+1), shape=[self.Z_DIM, self.X_SPACE_SIZE])
|
|
|
|
bias = self.get.variables(name='b_' + str(i+1), shape=[self.X_SPACE_SIZE])
|
|
|
|
x = tf.nn.sigmoid(tf.add(tf.matmul(x, kernel), bias))
|
|
|
|
return x
|
|
|
|
|
|
|
|
losses = tf.compat.v1.get_collection('dlosses', scope)
|
|
|
|
for i in range(self.NUM_GPUS):
|
|
|
|
with tf.device('/gpu:%d' % i):
|
|
|
|
with tf.name_scope('%s_%d' % ('TOWER', i)) as scope:
|
|
|
|
suffix = self.get.suffix()
|
|
|
|
_name = os.sep.join([self.train_dir,suffix])
|
|
|
|
# saver.save(sess, self.train_dir, write_meta_graph=False, global_step=epoch)
|
|
|
|
saver.save(sess, _name, write_meta_graph=False, global_step=epoch)
|
|
|
|
#
|
|
|
|
#
|
|
|
|
if self.logger :
|
|
|
|
row = {"logs":logs} #,"model":pickle.dump(sess)}
|
|
|
|
self.logger.write(row)
|
|
|
|
#
|
|
|
|
# @TODO:
|
|
|
|
# We should upload the files in the checkpoint
|
|
|
|
# This would allow the learnt model to be portable to another system
|
|
|
|
#
|
|
|
|
tf.compat.v1.reset_default_graph()
|
|
|
|
if p:
|
|
|
|
found.append(df)
|
|
|
|
if len(found) == NTH_VALID_CANDIDATE or i == CANDIDATE_COUNT:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# i = df.T.index.astype(np.int32) #-- These are numeric pseudonyms
|
|
|
|
# df = (i * df).sum(axis=1)
|
|
|
|
|
|
|
|
df = pd.read_csv(SYS_ARGS['raw-data'])
|
|
|
|
|
|
|
|
# cols = SYS_ARGS['column']
|
|
|
|
# _map,_df = (Binary()).Export(df)
|
|
|
|
# i = np.arange(_map[column]['start'],_map[column]['end'])
|
|
|
|
max_epochs = np.int32(SYS_ARGS['max_epochs']) if 'max_epochs' in SYS_ARGS else 10
|
|
|
|
# REAL = _df[:,i]
|
|
|
|
REAL = pd.get_dummies(df[column]).astype(np.float32).values
|
|
|
|
LABEL = pd.get_dummies(df[column_id]).astype(np.float32).values
|
|
|
|
trainer = Train(context=context,max_epochs=max_epochs,real=REAL,label=LABEL,column=column,column_id=column_id)
|
|
|
|
trainer.apply()
|