-
Notifications
You must be signed in to change notification settings - Fork 45
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Ring Buffer left shift preserves smallest weight #745
base: master
Are you sure you want to change the base?
Changes from all commits
b6dab81
a73821e
7f13025
2810213
0798b0c
3fc8879
9193015
15e5a0c
cc11b09
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -188,23 +188,23 @@ def get_n_connections_to_post_vertex_maximum(self, synapse_info): | |
@overrides(AbstractConnector.get_weight_mean) | ||
def get_weight_mean(self, weights): | ||
if self.__weights is None: | ||
return numpy.mean(weights) | ||
return super(FromListConnector, self).get_weight_mean(weights) | ||
else: | ||
return numpy.mean(numpy.abs(self.__weights)) | ||
|
||
@overrides(AbstractConnector.get_weight_maximum) | ||
def get_weight_maximum(self, synapse_info): | ||
# pylint: disable=too-many-arguments | ||
if self.__weights is None: | ||
return numpy.amax(synapse_info.weights) | ||
return self._get_weight_maximum(self.__weights, len(self.__conn_list)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Line too long (flake 8) |
||
else: | ||
return numpy.amax(numpy.abs(self.__weights)) | ||
|
||
@overrides(AbstractConnector.get_weight_variance) | ||
def get_weight_variance(self, weights): | ||
# pylint: disable=too-many-arguments | ||
if self.__weights is None: | ||
return numpy.var(weights) | ||
return super(FromListConnector, self).get_weight_variance(weights) | ||
else: | ||
return numpy.var(numpy.abs(self.__weights)) | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -446,6 +446,7 @@ def _get_ring_buffer_to_input_left_shifts( | |
weights_signed = False | ||
rate_stats = [RunningStats() for _ in range(n_synapse_types)] | ||
steps_per_second = 1000000.0 / machine_timestep | ||
min_max_weight = numpy.ones(n_synapse_types) * 2 ** 32 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note that this stores the "minimum of the max weights" rather than the "minimum weight". This is because the connector code does not include this. This could be added, but it should be noted that:
|
||
|
||
for app_edge in application_graph.get_edges_ending_at_vertex( | ||
application_vertex): | ||
|
@@ -472,7 +473,10 @@ def _get_ring_buffer_to_input_left_shifts( | |
0.0, delay_variance, n_connections) | ||
|
||
weight_max = (synapse_dynamics.get_weight_maximum( | ||
connector, synapse_info) * weight_scale) | ||
connector, synapse_info.weights) * weight_scale) | ||
min_max_weight[synapse_type] = \ | ||
min(min_max_weight[synapse_type], weight_max) | ||
|
||
biggest_weight[synapse_type] = max( | ||
biggest_weight[synapse_type], weight_max) | ||
|
||
|
@@ -520,24 +524,32 @@ def _get_ring_buffer_to_input_left_shifts( | |
total_weights[synapse_type]) | ||
max_weights[synapse_type] = max( | ||
max_weights[synapse_type], biggest_weight[synapse_type]) | ||
# This is to deal with very small weights that are floored to 0 | ||
mmw = 2**math.floor(math.log(min_max_weight[synapse_type], 2)) | ||
max_weights[synapse_type] = min(mmw * 2 ** 15, | ||
max_weights[synapse_type]) | ||
|
||
# Convert these to powers | ||
max_weight_powers = ( | ||
0 if w <= 0 else int(math.ceil(max(0, math.log(w, 2)))) | ||
0 if w <= 1 else int(math.ceil(max(0, math.log(w, 2)))) | ||
for w in max_weights) | ||
|
||
# If 2^max_weight_power equals the max weight, we have to add another | ||
# power, as range is 0 - (just under 2^max_weight_power)! | ||
max_weight_powers = ( | ||
w + 1 if (2 ** w) <= a else w | ||
w + 1 if (2 ** w) < a else w | ||
for w, a in zip(max_weight_powers, max_weights)) | ||
|
||
# If we have synapse dynamics that uses signed weights, | ||
# Add another bit of shift to prevent overflows | ||
if weights_signed: | ||
max_weight_powers = (m + 1 for m in max_weight_powers) | ||
|
||
return list(max_weight_powers) | ||
rb_ls = list(max_weight_powers) | ||
print("=" * 60) | ||
print("RB left shifts for {:20}".format(application_vertex.label), | ||
"=", rb_ls) | ||
print("-" * 60) | ||
return rb_ls | ||
Comment on lines
+547
to
+552
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As mentioned, this printing should be removed. Instead it would make sense to have a report that tells the user what scaling was used and why (i.e. expected maximum weights and scaling values). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should also possibly warn the user up-front when weight scaling cannot be achieved due to minimum weight representation issues. This would then explain the overflow messages that are likely to received later. Additionally, this code could detect and warn the user when it is impossible to represent both the minimum and maximum weight, suggesting the 16-bit weight is not enough to represent the dynamic range of values requested. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As mentioned elsewhere, we don't have the minimum weight here yet, so this might be needed if this is to report the range between the true minimum and the maximum, rather than the minimum of the maximums. |
||
|
||
@staticmethod | ||
def _get_weight_scale(ring_buffer_to_input_left_shift): | ||
|
@@ -566,7 +578,6 @@ def _write_padding( | |
next_block_allowed_address = self.__poptable_type\ | ||
.get_next_allowed_address(next_block_start_address) | ||
if next_block_allowed_address != next_block_start_address: | ||
|
||
# Pad out data file with the added alignment bytes: | ||
spec.comment("\nWriting population table required padding\n") | ||
spec.switch_write_focus(synaptic_matrix_region) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This might be a better value; I guess it depends on what the user provides as weight values. The idea here is for the weight to be more precise when using conductance values. For reference, conductance weight is in micro-Siemens but current weight is in nano-Amps; thus a scale of ~1000 has some justification, however the effects of conductance are non-linear since they depend upon the membrane voltage, and so weights to get similar outputs tend to be relatively bigger than a simple division by 1000. Perhaps this scaling should be dynamic depending on the expected sum of weights, in the same way as is done for the weight scaling elsewhere.