Skip to content

Commit

Permalink
fixed a silent bug, cleaned-up a bit.
Browse files Browse the repository at this point in the history
  • Loading branch information
mylonasc committed Jan 12, 2021
1 parent 16365c4 commit e2da3c5
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 1,016 deletions.
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# Tensorflow message passing networks library
A library for easy construction of message-passing networks in tensorflow keras.
Under construction.

# Usage
## Basic data-structures
Expand Down
775 changes: 5 additions & 770 deletions ibk_gnns/graphnet_utils.py

Large diffs are not rendered by default.

31 changes: 12 additions & 19 deletions ibk_gnns/minigraphnets.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,19 @@

def _copy_any_ds(val):
"""
Tensorflow copies (by value) resource variables so there is no copy method for them.
Because the datastructures I want need to have clear copy semantics this is a helper
function to make sure that when I want to copy I actually copy.
Copy semantics for different datatypes accepted.
This affects what happens when copying nodes, edges and graphs.
In order to trace gradients,
and defines a consistent interface regardless of the input data-structure.
"""
valout = val
if isinstance(val , np.ndarray) or isinstance(val, list):
valout = val.copy()

if isinstance(val, tf.Variable) or isinstance(val,tf.Tensor):
valout = val# if this is an eager tensor, the assignment copies the tensor.
return val
valout = tf.identity(val) # TODO: maybe have a flag to override this? Adding more ops does not always make sense.

return valout

class Node:
def __init__(self, node_attr_tensor):
Expand All @@ -32,20 +35,13 @@ def set_tensor(self, tensor):

def copy(self):
return Node(_copy_any_ds(self.node_attr_tensor))
# if isinstance(self.node_attr_tensor , np.ndarray):
# node_attr_tensor = self.node_attr_tensor.copy()
# else:
# node_attr_tensor = self.node_attr_tensor # if this is an eager tensor, the assignment copies the tensor.
# return Node(node_attr_tensor)

def __add__(self, n):
return Node(self.node_attr_tensor + n.node_attr_tensor)

def __sub__(self, n):
return Node(self.node_attr_tensor - n.node_attr_tensor)

# My implementation relies on eager mode and all computation happens in place. In reality only nodes
# and edges have data and the graph class is just for defining the computation between them.
class Edge:
def __init__(self, edge_attr_tensor, node_from, node_to):
self.edge_tensor = edge_attr_tensor
Expand Down Expand Up @@ -127,11 +123,7 @@ def copy(self):
# Instantiate the new edges:
coppied_edge_instances = []
for e in self.edges:
#if isinstance(e.edge_tensor, np.ndarray):
# edge_val = e.edge_tensor.copy()
#else:
# edge_val = e.edge_tensor
enew = e.copy(nodes_correspondence) #Edge(edge_val, nodes_correspondence[e.node_from], nodes_correspondence[e.node_to])
enew = e.copy(nodes_correspondence)
coppied_edge_instances.append(enew)
return Graph(nodes_coppied, coppied_edge_instances)

Expand Down Expand Up @@ -224,8 +216,6 @@ def make_graph_tuple_from_graph_list(list_of_graphs):
senders.append(all_nodes.index(e.node_from))
receivers.append(all_nodes.index(e.node_to))

#senders.append(e.node_from.find(gin.nodes))
#receivers.append(e.node_to.find(gin.nodes))

for n in all_nodes:
nodes_attr_tensor.append(n.node_attr_tensor)
Expand Down Expand Up @@ -289,6 +279,9 @@ def get_graph(self, graph_index):
"""
Returns a new graph with the same properties as the original graph.
gradients are not traced through this operation.
It's better if this method is avoided since it's inneficient.
TODO: include the implementation of algs for slicing graphs from graph tuples etc.
"""
assert(graph_index >=0 )
if graph_index > self.n_graphs:
Expand Down
224 changes: 0 additions & 224 deletions ibk_gnns/test.py

This file was deleted.

2 changes: 1 addition & 1 deletion ibk_gnns/utils_train.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np
"""
Some utilities (mostly mirroring keras facilities) with custom behavior for training GNNs.
Some utilities (mostly mirroring keras facilities)
These were prefered to the keras analogues to have greater transparency in what is going on and also have full control of conditions/events etc.
"""
class LossLogger:
Expand Down
1 change: 0 additions & 1 deletion test.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ def test_graph_tuple_eval(self):
gn.graph_tuple_eval(gt_copy)
graphs_evaluated_separately = [gn.graph_eval(g_) for g_ in old_graphs_list]
graphs_evaluated_from_graph_tuple = [gt_copy.get_graph(i) for i in range(gt_copy.n_graphs)]
#import tensorflow as tf
flatten_nodes = lambda x : tf.stack([x_.get_state() for x_ in x.nodes])
flatten_edges = lambda x : tf.stack([x_.edge_tensor for x_ in x.edges])
for g1,g2 in zip(graphs_evaluated_from_graph_tuple, graphs_evaluated_separately):
Expand Down

0 comments on commit e2da3c5

Please sign in to comment.