Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes to Convolutions #1286 #1294

Open
wants to merge 31 commits into
base: master
Choose a base branch
from
Open
Changes from 2 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
8ce3372
Added makefile for IF_curr_delta neurons to be used in convolutions
emijan-kth Jan 12, 2023
2e9c65f
Merge branch 'SpiNNakerManchester:master' into master
emijan-kth Jan 24, 2023
7accc3f
Added makefile for IF_curr_delta neurons to be used in convolutions
emijan-kth Jan 12, 2023
4324daf
Merge branch 'SpiNNakerManchester:master' into master
emijan-kth Jan 24, 2023
1d3e0fe
Merge branch 'SpiNNakerManchester:master' into master
emijan-kth Feb 2, 2023
6f70b30
Multiple fixes to convolutions. (#2)
emijan-kth Feb 2, 2023
047274b
Merge branch 'SpiNNakerManchester:master' into master
emijan-kth Feb 6, 2023
00388de
Merge branch 'local_only_delays'
emijan-kth Feb 10, 2023
bb90642
Fixed merge error: local_only_delays should have increased size of co…
emijan-kth Feb 10, 2023
91eeed6
Added option to ConvolutionConnector for delays varying horizontally …
emijan-kth Feb 10, 2023
4003c95
shapes and delays
emijan-kth Feb 15, 2023
584ed95
Set kernel_shape overrides shape of kernel_weights
emijan-kth Feb 16, 2023
13cf51e
Multisynaptic connections with varying delays.
emijan-kth Feb 16, 2023
27137e2
Merge pull request #1286 from emijan-kth/master
Christian-B Feb 20, 2023
5a7faa9
Merge branch 'master' into emijan-kth
Christian-B Feb 20, 2023
bdbade4
Merge branch 'master' into emijan-kth
Christian-B Feb 24, 2023
56d3974
Merge branch 'SpiNNakerManchester:local_only_delays' into local_only_…
emijan-kth Feb 26, 2023
eac4934
Merge branch 'local_only_delays'
emijan-kth Feb 26, 2023
986aa80
Revert "Added option to ConvolutionConnector for delays varying horiz…
emijan-kth Feb 27, 2023
cce37d4
Merge branch 'wip-strides-delays'
emijan-kth Feb 27, 2023
7d2a5bf
Fixed merge error: should have increased size of connector struct.
emijan-kth Feb 27, 2023
f882202
Work-in-progress: Merge remote-tracking branch 'upstream/master'
emijan-kth Mar 3, 2023
af90e25
Completed merge.
emijan-kth Mar 4, 2023
466aead
Merge branch 'master' into emijan-kth
rowleya Mar 14, 2023
b6a447e
Fix counting
rowleya Mar 14, 2023
1699340
Merge remote-tracking branch 'emijan-kth/master' into emijan-kth
rowleya Mar 14, 2023
8022750
Line too long
rowleya Mar 14, 2023
6ff18c8
Flake8
rowleya Mar 14, 2023
0b216a2
Rats
rowleya Mar 14, 2023
4e312c4
Merge remote-tracking branch 'upstream/master'
emijan-kth Mar 14, 2023
83f43f5
Merge remote-tracking branch 'emijan-kth/master' into emijan-kth
rowleya Mar 14, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
126 changes: 48 additions & 78 deletions neural_modelling/src/neuron/local_only/local_only_conv_impl.c
Original file line number Diff line number Diff line change
@@ -64,14 +64,7 @@ typedef struct {
lc_coord_t recip_pool_strides;
uint16_t positive_synapse_type;
uint16_t negative_synapse_type;
union {
uint32_t delay;
struct {
uint16_t multisynaptic_delay_max;
uint16_t multisynaptic_delay_step;
};
};
uint32_t num_multisynaptic_connections;
uint32_t delay;
uint32_t kernel_index;
} connector;

@@ -182,96 +175,73 @@ static inline lc_coord_t map_pre_to_post(connector *connector, lc_coord_t pre,
//! coordinates will be affected (i.e. which of them are 'reached' by
//! the kernel).
static inline void do_convolution_operation(
uint32_t time, lc_coord_t pre_coord, connector *conn,
uint32_t time, lc_coord_t pre_coord, connector *connector,
uint16_t *ring_buffers) {
lc_coord_t start_i;
log_debug("kernel height: %d, kernel width: %d, padding height: %d, "
"padding width: %d, strides row: %d, strides col: %d",
conn->kernel.height, conn->kernel.width, conn->padding.height,
conn->padding.width, conn->strides.row, conn->strides.col);
lc_coord_t post_coord = map_pre_to_post(conn, pre_coord, &start_i);
log_debug("kernel height: %d, kernel width: %d, "
"padding height: %d, padding width: %d, "
"strides row: %d, strides col: %d",
connector->kernel.height, connector->kernel.width,
connector->padding.height, connector->padding.width,
connector->strides.row, connector->strides.col);
lc_coord_t post_coord = map_pre_to_post(connector, pre_coord, &start_i);
log_debug("pre row %d, col %d AS post row %d, col %d",
pre_coord.row, pre_coord.col, post_coord.row, post_coord.col);
lc_weight_t *connector_weights = &weights[conn->kernel_index];
lc_weight_t *connector_weights = &weights[connector->kernel_index];

int32_t kw = conn->kernel.width;
int32_t kw = connector->kernel.width;
for (int32_t i_row = start_i.row, tmp_row = post_coord.row;
i_row < conn->kernel.height; i_row += conn->strides.row, --tmp_row) {
int32_t kr = conn->kernel.height - 1 - i_row;
i_row < connector->kernel.height; i_row += connector->strides.row, --tmp_row) {
int32_t kr = connector->kernel.height - 1 - i_row;
log_debug("i_row = %u, kr = %u, tmp_row = %u", i_row, kr, tmp_row);

if ((tmp_row < config->post_start.row) || (tmp_row > config->post_end.row)) {
log_debug("tmp_row outside");
continue;
}

for (int32_t i_col = start_i.col, tmp_col = post_coord.col;
i_col < conn->kernel.width; i_col += conn->strides.col, --tmp_col) {
int32_t kc = conn->kernel.width - 1 - i_col;

uint32_t delay;
if (conn->num_multisynaptic_connections == 1) {
delay = conn->delay;
} else {
kc = conn->kernel.width - 1 - conn->num_multisynaptic_connections * i_col;
if (kc < 0) {
log_debug("Multisynaptic connection: i_col = %u, kc = %u, tmp_col = %u",
i_col, kc, tmp_col);
break;
}
delay = conn->multisynaptic_delay_max;
log_debug("Multisynaptic connection: start_i.col = %u, delay = %u",
start_i.col, delay);
}

i_col < connector->kernel.width; i_col += connector->strides.col, --tmp_col) {
int32_t kc = connector->kernel.width - 1 - i_col;
log_debug("i_col = %u, kc = %u, tmp_col = %u", i_col, kc, tmp_col);

if ((tmp_col < config->post_start.col) || (tmp_col > config->post_end.col)) {
log_debug("tmp_col outside");
continue;
}

for (uint32_t multisynapse_index = 0;
multisynapse_index < conn->num_multisynaptic_connections;
++multisynapse_index, delay -= conn->multisynaptic_delay_step, --kc) {
log_debug("kc = %u, delay = %u", kc, delay);

// This the neuron id relative to the neurons on this core
uint32_t post_index =
((tmp_row - config->post_start.row) * config->post_shape.width)
+ (tmp_col - config->post_start.col);
uint32_t k = (kr * kw) + kc;
log_debug("weight index = %u", k);
lc_weight_t weight = connector_weights[k];
if (weight == 0) {
log_debug("zero weight");
continue;
}
uint32_t rb_index = 0;
if (weight > 0) {
rb_index = synapse_row_get_ring_buffer_index(time + delay,
conn->positive_synapse_type, post_index,
synapse_type_index_bits, synapse_index_bits,
synapse_delay_mask);
} else {
rb_index = synapse_row_get_ring_buffer_index(time + delay,
conn->negative_synapse_type, post_index,
synapse_type_index_bits, synapse_index_bits,
synapse_delay_mask);
weight = -weight;
}

log_debug("Updating ring_buffers[%u] for post neuron %u = %u, %u, with weight %u",
rb_index, post_index, tmp_col, tmp_row, weight);

// Add weight to current ring buffer value, avoiding saturation
uint32_t accumulation = ring_buffers[rb_index] + weight;
uint32_t sat_test = accumulation & 0x10000;
if (sat_test) {
accumulation = sat_test - 1;
}
ring_buffers[rb_index] = accumulation;
// This the neuron id relative to the neurons on this core
uint32_t post_index =
((tmp_row - config->post_start.row) * config->post_shape.width)
+ (tmp_col - config->post_start.col);
uint32_t k = (kr * kw) + kc;
log_debug("weight index = %u", k);
lc_weight_t weight = connector_weights[k];
if (weight == 0) {
log_debug("zero weight");
continue;
}
uint32_t rb_index = 0;
if (weight > 0) {
rb_index = synapse_row_get_ring_buffer_index(time + connector->delay,
connector->positive_synapse_type, post_index,
synapse_type_index_bits, synapse_index_bits,
synapse_delay_mask);
} else {
rb_index = synapse_row_get_ring_buffer_index(time + connector->delay,
connector->negative_synapse_type, post_index,
synapse_type_index_bits, synapse_index_bits,
synapse_delay_mask);
weight = -weight;
}
log_debug("Updating ring_buffers[%u] for post neuron %u = %u, %u, with weight %u",
rb_index, post_index, tmp_col, tmp_row, weight);

// Add weight to current ring buffer value, avoiding saturation
uint32_t accumulation = ring_buffers[rb_index] + weight;
uint32_t sat_test = accumulation & 0x10000;
if (sat_test) {
accumulation = sat_test - 1;
}
ring_buffers[rb_index] = accumulation;
}
}
}
Original file line number Diff line number Diff line change
@@ -34,7 +34,7 @@
#: The number of 16-bit shorts in the connector struct,
#: ignoring the source_key_info struct but including the delay and the
#: 32-bit weight index
CONNECTOR_CONFIG_SHORTS = 28
CONNECTOR_CONFIG_SHORTS = 18


class ConvolutionConnector(AbstractConnector):
@@ -46,24 +46,18 @@ class ConvolutionConnector(AbstractConnector):

__slots__ = [
"__kernel_weights",
"__kernel_shape",
"__strides",
"__padding_shape",
"__pool_shape",
"__pool_stride",
"__positive_receptor_type",
"__negative_receptor_type",
"__num_multisynaptic_connections",
"__multisynaptic_delay_min"
"__negative_receptor_type"
]

def __init__(self, kernel_weights, kernel_shape=None, strides=None,
padding=None, pool_shape=None, pool_stride=None,
positive_receptor_type="excitatory",
negative_receptor_type="inhibitory",
num_multisynaptic_connections=1,
multisynaptic_delay_min=0,
safe=True,
negative_receptor_type="inhibitory", safe=True,
verbose=False, callback=None):
"""
:param kernel_weights:
@@ -141,11 +135,6 @@ def __init__(self, kernel_weights, kernel_shape=None, strides=None,
self.__positive_receptor_type = positive_receptor_type
self.__negative_receptor_type = negative_receptor_type

self.__num_multisynaptic_connections = num_multisynaptic_connections
if num_multisynaptic_connections != 1:
self.__kernel_shape = self.__get_kernel_shape(kernel_shape)
self.__multisynaptic_delay_min = multisynaptic_delay_min

@property
def positive_receptor_type(self):
return self.__positive_receptor_type
@@ -194,8 +183,6 @@ def __decode_kernel(self, w, shape):
f"Unknown combination of kernel_weights ({w}) and"
f" kernel_shape ({shape})")

self.__kernel_shape = self.__kernel_weights.shape

@staticmethod
def __to_2d_shape(shape, param_name):
if shape is None:
@@ -228,7 +215,7 @@ def get_post_shape(self, shape):
post_pool_shape = shape - (self.__pool_shape - 1)
shape = (post_pool_shape // self.__pool_stride) + 1

kernel_shape = numpy.array(self.__kernel_shape)
kernel_shape = numpy.array(self.__kernel_weights.shape)
post_shape = shape - kernel_shape + (2 * self.__padding_shape)

return numpy.clip(
@@ -285,7 +272,7 @@ def get_n_connections_from_pre_vertex_maximum(

@overrides(AbstractConnector.get_n_connections_to_post_vertex_maximum)
def get_n_connections_to_post_vertex_maximum(self, synapse_info):
w, h = self.__kernel_shape
w, h = self.__kernel_weights.shape
return numpy.clip(w * h, 0, synapse_info.n_pre_neurons)

@overrides(AbstractConnector.get_weight_maximum)
@@ -306,7 +293,7 @@ def get_connected_vertices(self, s_info, source_vertex, target_vertex):
pre_vertex_in_post_layer_upper_left = pre_vertex_in_post_layer[:, 0]
pre_vertex_in_post_layer_lower_right = pre_vertex_in_post_layer[:, 1]

kernel_shape = numpy.array(self.__kernel_shape)
kernel_shape = numpy.array(self.__kernel_weights.shape)

j = (kernel_shape - 1 - start_i) // self.__strides
j_upper_left = j[:, 0]
@@ -345,9 +332,7 @@ def get_connected_vertices(self, s_info, source_vertex, target_vertex):
return connected

def get_max_n_incoming_slices(self, source_vertex, target_vertex):
pre_vertices = numpy.array(
source_vertex.splitter.get_out_going_vertices(SPIKE_PARTITION_ID))
pre_slices = [m_vertex.vertex_slice for m_vertex in pre_vertices]
pre_slices = list(source_vertex.splitter.get_out_going_slices())
pre_slices_x = [vtx_slice.get_slice(0) for vtx_slice in pre_slices]
pre_slices_y = [vtx_slice.get_slice(1) for vtx_slice in pre_slices]
pre_ranges = [[[py.start, px.start], [py.stop - 1, px.stop - 1]]
@@ -357,7 +342,7 @@ def get_max_n_incoming_slices(self, source_vertex, target_vertex):
pre_vertex_in_post_layer_upper_left = pre_vertex_in_post_layer[:, 0]
pre_vertex_in_post_layer_lower_right = pre_vertex_in_post_layer[:, 1]

kernel_shape = numpy.array(self.__kernel_shape)
kernel_shape = numpy.array(self.__kernel_weights.shape)

j = (kernel_shape - 1 - start_i) // self.__strides
j_upper_left = j[:, 0]
@@ -372,7 +357,7 @@ def get_max_n_incoming_slices(self, source_vertex, target_vertex):
post_slice_x = post_slice.get_slice(0)
post_slice_y = post_slice.get_slice(1)

# Get ranges allowed in post vertex
# Get ranges allowed in post
min_x = post_slice_x.start
max_x = post_slice_x.stop - 1
min_y = post_slice_y.start
@@ -387,9 +372,8 @@ def get_max_n_incoming_slices(self, source_vertex, target_vertex):
numpy.any(pre_vertex_max_reach_in_post_layer_lower_right <
[min_y, min_x], axis=1))
# When both things are true, we have a vertex in range
pre_in_range = pre_vertices[
numpy.logical_and(start_in_range, end_in_range)]
n_connected = len(pre_in_range)
pre_in_range = numpy.logical_and(start_in_range, end_in_range)
n_connected = pre_in_range.sum()
max_connected = max(max_connected, n_connected)

return max_connected
@@ -467,28 +451,15 @@ def get_local_only_data(
self.__recip(ps_y), self.__recip(ps_x),
pos_synapse_type, neg_synapse_type], dtype="uint16")

# Write delay
# Work out delay
delay_step = (delay *
SpynnakerDataView.get_simulation_time_step_per_ms())
local_delay = (delay_step %
app_edge.post_vertex.splitter.max_support_delay())

if self.__num_multisynaptic_connections == 1:
delay_view = numpy.array([local_delay], dtype="uint32")
else:
delay_range = local_delay - self.__multisynaptic_delay_min
delay_step = (delay_range /
(self.__num_multisynaptic_connections - 1))
delay_view = numpy.array(
[local_delay, delay_step], dtype="uint16").view("uint32")

# Compile all values
data = [numpy.array(values, dtype="uint32"),
short_values.view("uint32"),
delay_view,
numpy.array([self.__num_multisynaptic_connections],
dtype="uint32"),
numpy.array([weight_index], dtype="uint32")]
numpy.array([local_delay, weight_index], dtype="uint32")]
return data

def get_encoded_kernel_weights(self, app_edge, weight_scales):