|
|
|
"""
|
|
|
|
import pickle
|
|
|
|
else:
|
|
|
|
self.mkdir(os.sep.join([self.log_dir,key]))
|
|
|
|
"X_SPACE_SIZE":self.X_SPACE_SIZE,
|
|
|
|
"D_STRUCTURE":self.D_STRUCTURE,
|
|
|
|
"G_STRUCTURE":self.G_STRUCTURE,
|
|
|
|
"NUM_GPUS":self.NUM_GPUS,
|
|
|
|
"NUM_LABELS":self.NUM_LABELS,
|
|
|
|
"MAX_EPOCHS":self.MAX_EPOCHS,
|
|
|
|
"ROW_COUNT":self.ROW_COUNT
|
|
|
|
}
|
|
|
|
if args and 'key' in args and 'value' in args :
|
|
|
|
key = args['key']
|
|
|
|
value= args['value']
|
|
|
|
object[key] = value
|
|
|
|
# suffix = "-".join(self.column) if isinstance(self.column,list) else self.column
|
|
|
|
suffix = self.get.suffix()
|
|
|
|
_name = os.sep.join([self.out_dir,'meta-'+suffix])
|
|
|
|
|
|
|
|
f = open(_name+'.json','w')
|
|
|
|
f.write(json.dumps(_object))
|
|
|
|
return _object
|
|
|
|
def mkdir (self,path):
|
|
|
|
if not os.path.exists(path) :
|
|
|
|
os.mkdir(path)
|
|
|
|
bias = self.get.variables(name='b_' + str(i), shape=[1])
|
|
|
|
y = tf.add(tf.matmul(x, kernel), bias)
|
|
|
|
return y
|
|
|
|
# print ([" *** ",self.BATCHSIZE_PER_GPU])
|
|
|
|
if stage == 'D':
|
|
|
|
w, loss = self.discriminator.loss(real=real, fake=fake, label=label)
|
|
|
|
#losses = tf.get_collection('dlosses', scope)
|
|
|
|
flag = 'dlosses'
|
|
|
|
losses = tf.compat.v1.get_collection('dlosses', scope)
|
|
|
|
else:
|
|
|
|
w, loss = self.generator.loss(fake=fake, label=label)
|
|
|
|
#losses = tf.get_collection('glosses', scope)
|
|
|
|
flag = 'glosses'
|
|
|
|
losses = tf.compat.v1.get_collection('glosses', scope)
|
|
|
|
# losses = tf.compat.v1.get_collection(flag, scope)
|
|
|
|
|
|
|
|
total_loss = tf.add_n(losses, name='total_loss')
|
|
|
|
|
|
|
|
return total_loss, w
|
|
|
|
def input_fn(self):
|
|
|
|
"""
|
|
|
|
This function seems to produce
|
|
|
|
"""
|
|
|
|
features_placeholder = tf.compat.v1.placeholder(shape=self._REAL.shape, dtype=tf.float32)
|
|
|
|
iterator, features_placeholder, labels_placeholder = self.input_fn()
|
|
|
|
|
|
|
|
train_d, w_distance, iterator_d, features_placeholder_d, labels_placeholder_d = self.network(stage='D', opt=opt_d)
|
|
|
|
train_g, _, iterator_g, features_placeholder_g, labels_placeholder_g = self.network(stage='G', opt=opt_g)
|
|
|
|
# saver = tf.train.Saver()
|
|
|
|
saver = tf.compat.v1.train.Saver()
|
|
|
|
# init = tf.global_variables_initializer()
|
|
|
|
init = tf.compat.v1.global_variables_initializer()
|
|
|
|
logs = []
|
|
|
|
#with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
|
|
|
|
with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
|
|
|
|
model_dir = os.sep.join([self.train_dir,suffix+'-'+str(self.MAX_EPOCHS)])
|
|
|
|
demo = self._LABEL #np.zeros([self.ROW_COUNT,self.NUM_LABELS]) #args['de"shape":{"LABEL":list(self._LABEL.shape)} mo']
|
|
|
|
tf.compat.v1.reset_default_graph()
|
|
|
|
z = tf.random.normal(shape=[self.BATCHSIZE_PER_GPU, self.Z_DIM])
|
|
|
|
y = tf.compat.v1.placeholder(shape=[self.BATCHSIZE_PER_GPU, self.NUM_LABELS], dtype=tf.int32)
|
|
|
|
|
|
|
|
|
|
|
|
# output.extend(np.round(f))
|
|
|
|
|
|
|
|
# for m in range(2):
|
|
|
|
# for n in range(2, self.NUM_LABELS):
|
|
|
|
# idx1 = (demo[:, m] == 1)
|
|
|
|
# idx2 = (demo[:, n] == 1)
|
|
|
|
# idx = [idx1[j] and idx2[j] for j in range(len(idx1))]
|
|
|
|
# num = np.sum(idx)
|
|
|
|
# print ("___________________list__")
|
|
|
|
# print (idx1)
|
|
|
|
# print (idx2)
|
|
|
|
# print (idx)
|
|
|
|
# print (num)
|
|
|
|
# print ("_____________________")
|
|
|
|
# nbatch = int(np.ceil(num / self.BATCHSIZE_PER_GPU))
|
|
|
|
# label_input = np.zeros((nbatch*self.BATCHSIZE_PER_GPU, self.NUM_LABELS))
|
|
|
|
# label_input[:, n] = 1
|
|
|
|
# label_input[:, m] = 1
|
|
|
|
# output = []
|
|
|
|
# for i in range(nbatch):
|
|
|
|
# f = sess.run(fake,feed_dict={y: label_input[i* self.BATCHSIZE_PER_GPU:(i+1)* self.BATCHSIZE_PER_GPU]})
|
|
|
|
# output.extend(np.round(f))
|
|
|
|
# output = np.array(output)[:num]
|
|
|
|
# print ([m,n,output])
|
|
|
|
|
|
|
|
# np.save(self.out_dir + str(m) + str(n), output)
|
|
|
|
|
|
|
|
|