Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

change test set seed #7

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions TSP/tsp_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,10 @@ def __init__(self,

self.n_nodes = args['n_nodes']
self.input_dim = args['input_dim']
self.input_data = tf.placeholder(tf.float32,\
self.input_data = tf.compat.v1.placeholder(tf.float32,\
shape=[None,self.n_nodes,args['input_dim']])
self.input_pnt = self.input_data
self.batch_size = tf.shape(self.input_data)[0]
self.batch_size = tf.shape(input=self.input_data)[0]

def reset(self,beam_width=1):
'''
Expand Down Expand Up @@ -198,6 +198,6 @@ def reward_func(sample_solution=None):
# get the reward based on the route lengths


route_lens_decoded = tf.reduce_sum(tf.pow(tf.reduce_sum(tf.pow(\
(sample_solution_tilted - sample_solution) ,2), 2) , .5), 0)
route_lens_decoded = tf.reduce_sum(input_tensor=tf.pow(tf.reduce_sum(input_tensor=tf.pow(\
(sample_solution_tilted - sample_solution) ,2), axis=2) , .5), axis=0)
return route_lens_decoded
40 changes: 20 additions & 20 deletions VRP/vrp_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,19 @@ def __init__(self, dim, use_tanh=False, C=10,_name='Attention',_scope=''):
self.use_tanh = use_tanh
self._scope = _scope

with tf.variable_scope(_scope+_name):
with tf.compat.v1.variable_scope(_scope+_name):
# self.v: is a variable with shape [1 x dim]
self.v = tf.get_variable('v',[1,dim],
initializer=tf.contrib.layers.xavier_initializer())
self.v = tf.compat.v1.get_variable('v',[1,dim],
initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"))
self.v = tf.expand_dims(self.v,2)

self.emb_d = tf.layers.Conv1D(dim,1,_scope=_scope+_name+'/emb_d' ) #conv1d
self.emb_ld = tf.layers.Conv1D(dim,1,_scope=_scope+_name+'/emb_ld' ) #conv1d_2
self.emb_d = tf.compat.v1.layers.Conv1D(dim,1,_scope=_scope+_name+'/emb_d' ) #conv1d
self.emb_ld = tf.compat.v1.layers.Conv1D(dim,1,_scope=_scope+_name+'/emb_ld' ) #conv1d_2

self.project_d = tf.layers.Conv1D(dim,1,_scope=_scope+_name+'/proj_d' ) #conv1d_1
self.project_ld = tf.layers.Conv1D(dim,1,_scope=_scope+_name+'/proj_ld' ) #conv1d_3
self.project_query = tf.layers.Dense(dim,_scope=_scope+_name+'/proj_q' ) #
self.project_ref = tf.layers.Conv1D(dim,1,_scope=_scope+_name+'/proj_ref' ) #conv1d_4
self.project_d = tf.compat.v1.layers.Conv1D(dim,1,_scope=_scope+_name+'/proj_d' ) #conv1d_1
self.project_ld = tf.compat.v1.layers.Conv1D(dim,1,_scope=_scope+_name+'/proj_ld' ) #conv1d_3
self.project_query = tf.compat.v1.layers.Dense(dim,_scope=_scope+_name+'/proj_q' ) #
self.project_ref = tf.compat.v1.layers.Conv1D(dim,1,_scope=_scope+_name+'/proj_ref' ) #conv1d_4


self.C = C # tanh exploration parameter
Expand All @@ -40,7 +40,7 @@ def __call__(self, query, ref, env):
# get the current demand and load values from environment
demand = env.demand
load = env.load
max_time = tf.shape(demand)[1]
max_time = tf.shape(input=demand)[1]

# embed demand and project it
# emb_d:[batch_size x max_time x dim ]
Expand All @@ -61,7 +61,7 @@ def __call__(self, query, ref, env):
expanded_q = tf.tile(tf.expand_dims(q,1),[1,max_time,1])

# v_view:[batch_size x dim x 1]
v_view = tf.tile( self.v, [tf.shape(e)[0],1,1])
v_view = tf.tile( self.v, [tf.shape(input=e)[0],1,1])

# u : [batch_size x max_time x dim] * [batch_size x dim x 1] =
# [batch_size x max_time]
Expand All @@ -82,17 +82,17 @@ def __init__(self, dim, use_tanh=False, C=10,_name='Attention',_scope=''):
self.use_tanh = use_tanh
self._scope = _scope

with tf.variable_scope(_scope+_name):
with tf.compat.v1.variable_scope(_scope+_name):
# self.v: is a variable with shape [1 x dim]
self.v = tf.get_variable('v',[1,dim],
initializer=tf.contrib.layers.xavier_initializer())
self.v = tf.compat.v1.get_variable('v',[1,dim],
initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"))
self.v = tf.expand_dims(self.v,2)

self.emb_d = tf.layers.Conv1D(dim,1,_scope=_scope+_name +'/emb_d') #conv1d
self.project_d = tf.layers.Conv1D(dim,1,_scope=_scope+_name +'/proj_d') #conv1d_1
self.emb_d = tf.compat.v1.layers.Conv1D(dim,1,_scope=_scope+_name +'/emb_d') #conv1d
self.project_d = tf.compat.v1.layers.Conv1D(dim,1,_scope=_scope+_name +'/proj_d') #conv1d_1

self.project_query = tf.layers.Dense(dim,_scope=_scope+_name +'/proj_q') #
self.project_ref = tf.layers.Conv1D(dim,1,_scope=_scope+_name +'/proj_e') #conv1d_2
self.project_query = tf.compat.v1.layers.Dense(dim,_scope=_scope+_name +'/proj_q') #
self.project_ref = tf.compat.v1.layers.Conv1D(dim,1,_scope=_scope+_name +'/proj_e') #conv1d_2

self.C = C # tanh exploration parameter
self.tanh = tf.nn.tanh
Expand All @@ -119,7 +119,7 @@ def __call__(self, query, ref, env):
"""
# we need the first demand value for the critic
demand = env.input_data[:,:,-1]
max_time = tf.shape(demand)[1]
max_time = tf.shape(input=demand)[1]

# embed demand and project it
# emb_d:[batch_size x max_time x dim ]
Expand All @@ -134,7 +134,7 @@ def __call__(self, query, ref, env):
expanded_q = tf.tile(tf.expand_dims(q,1),[1,max_time,1])

# v_view:[batch_size x dim x 1]
v_view = tf.tile( self.v, [tf.shape(e)[0],1,1])
v_view = tf.tile( self.v, [tf.shape(input=e)[0],1,1])

# u : [batch_size x max_time x dim] * [batch_size x dim x 1] =
# [batch_size x max_time]
Expand Down
12 changes: 6 additions & 6 deletions VRP/vrp_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,12 +144,12 @@ def __init__(self,
self.n_nodes = args['n_nodes']
self.n_cust = args['n_cust']
self.input_dim = args['input_dim']
self.input_data = tf.placeholder(tf.float32,\
self.input_data = tf.compat.v1.placeholder(tf.float32,\
shape=[None,self.n_nodes,self.input_dim])

self.input_pnt = self.input_data[:,:,:2]
self.demand = self.input_data[:,:,-1]
self.batch_size = tf.shape(self.input_pnt)[0]
self.batch_size = tf.shape(input=self.input_pnt)[0]

def reset(self,beam_width=1):
'''
Expand Down Expand Up @@ -220,7 +220,7 @@ def step(self,
d_sat = tf.minimum(tf.gather_nd(self.demand,batched_idx), self.load)

# update the demand
d_scatter = tf.scatter_nd(batched_idx, d_sat, tf.cast(tf.shape(self.demand),tf.int64))
d_scatter = tf.scatter_nd(batched_idx, d_sat, tf.cast(tf.shape(input=self.demand),tf.int64))
self.demand = tf.subtract(self.demand, d_scatter)

# update load
Expand All @@ -239,7 +239,7 @@ def step(self,

self.mask += tf.concat( [tf.tile(tf.expand_dims(tf.cast(tf.equal(self.load,0),
tf.float32),1), [1,self.n_cust]),
tf.expand_dims(tf.multiply(tf.cast(tf.greater(tf.reduce_sum(self.demand,1),0),tf.float32),
tf.expand_dims(tf.multiply(tf.cast(tf.greater(tf.reduce_sum(input_tensor=self.demand,axis=1),0),tf.float32),
tf.squeeze( tf.cast(tf.equal(idx,self.n_cust),tf.float32))),1)],1)

state = State(load=self.load,
Expand Down Expand Up @@ -283,7 +283,7 @@ def reward_func(sample_solution):
# get the reward based on the route lengths


route_lens_decoded = tf.reduce_sum(tf.pow(tf.reduce_sum(tf.pow(\
(sample_solution_tilted - sample_solution) ,2), 2) , .5), 0)
route_lens_decoded = tf.reduce_sum(input_tensor=tf.pow(tf.reduce_sum(input_tensor=tf.pow(\
(sample_solution_tilted - sample_solution) ,2), axis=2) , .5), axis=0)
return route_lens_decoded

10 changes: 6 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
from shared.decode_step import RNNDecodeStep
from model.attention_agent import RLAgent

tf.compat.v1.disable_eager_execution()

def load_task_specific_components(task):
'''
This function load task-specific libraries
Expand All @@ -36,9 +38,9 @@ def load_task_specific_components(task):
return DataGenerator, Env, reward_func, AttentionActor, AttentionCritic

def main(args, prt):
config = tf.ConfigProto()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess = tf.compat.v1.Session(config=config)

# load task specific classes
DataGenerator, Env, reward_func, AttentionActor, AttentionCritic = \
Expand Down Expand Up @@ -97,7 +99,7 @@ def main(args, prt):
if random_seed is not None and random_seed > 0:
prt.print_out("# Set random seed to %d" % random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
tf.reset_default_graph()
tf.compat.v1.set_random_seed(random_seed)
tf.compat.v1.reset_default_graph()

main(args, prt)
34 changes: 17 additions & 17 deletions misc_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,11 @@ class Logger(object):

def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
self.writer = tf.summary.FileWriter(log_dir)
self.writer = tf.compat.v1.summary.FileWriter(log_dir)

def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)

def image_summary(self, tag, images, step):
Expand All @@ -111,14 +111,14 @@ def image_summary(self, tag, images, step):
scipy.misc.toimage(img).save(s, format="png")

# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
img_sum = tf.compat.v1.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
img_summaries.append(tf.compat.v1.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))

# Create and write Summary
summary = tf.Summary(value=img_summaries)
summary = tf.compat.v1.Summary(value=img_summaries)
self.writer.add_summary(summary, step)

def histo_summary(self, tag, values, step, bins=1000):
Expand All @@ -128,7 +128,7 @@ def histo_summary(self, tag, values, step, bins=1000):
counts, bin_edges = np.histogram(values, bins=bins)

# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist = tf.compat.v1.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
Expand All @@ -145,7 +145,7 @@ def histo_summary(self, tag, values, step, bins=1000):
hist.bucket.append(c)

# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()

Expand All @@ -158,12 +158,12 @@ def _single_cell(unit_type, num_units, forget_bias, dropout, prt,
# Cell Type
if unit_type == "lstm":
prt.print_out(" LSTM, forget_bias=%g" % forget_bias, new_line=False)
single_cell = tf.contrib.rnn.BasicLSTMCell(
single_cell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(
num_units,
forget_bias=forget_bias)
elif unit_type == "gru":
prt.print_out(" GRU", new_line=False)
single_cell = tf.contrib.rnn.GRUCell(num_units)
single_cell = tf.compat.v1.nn.rnn_cell.GRUCell(num_units)
else:
raise ValueError("Unknown unit type %s!" % unit_type)

Expand Down Expand Up @@ -250,15 +250,15 @@ def create_rnn_cell(unit_type, num_units, num_layers, num_residual_layers,
if len(cell_list) == 1: # Single layer.
return cell_list[0]
else: # Multi layers
return tf.contrib.rnn.MultiRNNCell(cell_list)
return tf.compat.v1.nn.rnn_cell.MultiRNNCell(cell_list)

def gradient_clip(gradients, params, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
gradient_norm_summary = [tf.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary = [tf.compat.v1.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_gradients)))
tf.compat.v1.summary.scalar("clipped_gradient", tf.linalg.global_norm(clipped_gradients)))

return clipped_gradients, gradient_norm_summary

Expand All @@ -274,7 +274,7 @@ def create_or_load_model(model, model_dir, session, out_dir, name):
else:
utils.print_out(" created %s model with fresh parameters, time %.2fs." %
(name, time.time() - start_time))
session.run(tf.global_variables_initializer())
session.run(tf.compat.v1.global_variables_initializer())

global_step = model.global_step.eval(session=session)
return model, global_step
Expand All @@ -290,14 +290,14 @@ def add_summary(summary_writer, global_step, tag, value):
"""Add a new summary to the current summary_writer.
Useful to log things that are not part of the training graph, e.g., tag=BLEU.
"""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag=tag, simple_value=value)])
summary_writer.add_summary(summary, global_step)


def get_config_proto(log_device_placement=False, allow_soft_placement=True):
# GPU options:
# https://www.tensorflow.org/versions/r0.10/how_tos/using_gpu/index.html
config_proto = tf.ConfigProto(
config_proto = tf.compat.v1.ConfigProto(
log_device_placement=log_device_placement,
allow_soft_placement=allow_soft_placement)
config_proto.gpu_options.allow_growth = True
Expand All @@ -311,7 +311,7 @@ def debug_tensor(s, msg=None, summarize=10):
"""Print the shape and value of a tensor at test time. Return a new tensor."""
if not msg:
msg = s.name
return tf.Print(s, [tf.shape(s), s], msg + " ", summarize=summarize)
return tf.compat.v1.Print(s, [tf.shape(input=s), s], msg + " ", summarize=summarize)

def tf_print(tensor, transform=None):

Expand All @@ -321,7 +321,7 @@ def print_tensor(x):
# but adding a transformation of some kind usually makes the output more digestible
print(x if transform is None else transform(x))
return x
log_op = tf.py_func(print_tensor, [tensor], [tensor.dtype])[0]
log_op = tf.compat.v1.py_func(print_tensor, [tensor], [tensor.dtype])[0]
with tf.control_dependencies([log_op]):
res = tf.identity(tensor)

Expand Down
Loading