|
|
|
"""
|
|
|
|
from data.params import SYS_ARGS
|
|
|
|
self.BATCHSIZE_PER_GPU = PROPOSED_BATCH_PER_GPU
|
|
|
|
"""
|
|
|
|
# suffix = "-".join(column) if isinstance(column,list)else column
|
|
|
|
suffix = self.CONTEXT #self.get.suffix()
|
|
|
|
This function will perform a batch normalization on an network layer
|
|
|
|
inputs input layer of the neural network
|
|
|
|
name name of the scope the
|
|
|
|
labels labels (attributes not synthesized) by default None
|
|
|
|
n_labels number of labels default None
|
|
|
|
"""
|
|
|
|
inputs = args['inputs']
|
|
|
|
name = args['name']
|
|
|
|
labels = None if 'labels' not in args else args['labels']
|
|
|
|
n_labels= None if 'n_labels' not in args else args['n_labels']
|
|
|
|
shift = [0] if self.__class__.__name__.lower() == 'generator' else [1] #-- not sure what this is doing
|
|
|
|
This function will build the network that will generate the synthetic candidates
|
|
|
|
:inputs matrix of data that we need
|
|
|
|
:dim dimensions of ...
|
|
|
|
"""
|
|
|
|
x = args['inputs']
|
|
|
|
tmp_dim = self.Z_DIM if 'dim' not in args else args['dim']
|
|
|
|
label = args['label']
|
|
|
|
#all_regs = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
|
|
|
|
all_regs = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)
|
|
|
|
w_distance = -tf.reduce_mean(y_hat_real) + tf.reduce_mean(y_hat_fake)
|
|
|
|
loss = w_distance + 10 * gradient_penalty + sum(all_regs)
|
|
|
|
#tf.add_to_collection('dlosses', loss)
|
|
|
|
tf.compat.v1.add_to_collection('dlosses', loss)
|
|
|
|
|
|
|
|
return w_distance, loss
|
|
|
|
return total_loss, w
|
|
|
|
def input_fn(self):
|
|
|
|
"""
|
|
|
|
This function seems to produce
|
|
|
|
"""
|
|
|
|
features_placeholder = tf.compat.v1.placeholder(shape=self._REAL.shape, dtype=tf.float32)
|
|
|
|
duration = time.time() - start_time
|
|
|
|
|
|
|
|
assert not np.isnan(w_sum), 'Model diverged with loss = NaN'
|
|
|
|
|
|
|
|
format_str = 'epoch: %d, w_distance = %f (%.1f)'
|
|
|
|
print(format_str % (epoch, -w_sum/(self.STEPS_PER_EPOCH*2), duration))
|
|
|
|
# print (dir (w_distance))
|
|
|
|
|
|
|
|
# # row = {"module":"gan-train","action":"epochs","input":{"logs":logs}} #,"model":pickle.dump(sess)}
|
|
|
|
# # self.logger.write(row)
|
|
|
|
# self.logs['epochs'] += logs
|
|
|
|
# #
|
|
|
|
# # @TODO:
|
|
|
|
# # We should upload the files in the checkpoint
|
|
|
|
# # This would allow the learnt model to be portable to another system
|
|
|
|
self.oROW_COUNT = self.ROW_COUNT
|
|
|
|
# candidates.append (np.round(_matrix).astype(np.int64))
|
|
|
|
|
|
|
|
|