Skip to content

Commit

Permalink
Merge pull request #138 from kklurz/main
Browse files Browse the repository at this point in the history
Last fixes before publication
  • Loading branch information
KonstantinWilleke authored Nov 18, 2021
2 parents 50f721c + 69fef54 commit 6b732af
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 6 deletions.
12 changes: 8 additions & 4 deletions neuralpredictors/layers/cores/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,14 @@ def __init__(
self.linear = linear

if depth_separable:
self.conv_layer_name = "ds_conv"
self.ConvLayer = DepthSeparableConv2d
elif attention_conv:
# TODO: check if name attention_conv is backwards compatible
self.conv_layer_name = "attention_conv"
self.ConvLayer = self.AttentionConvWrapper
else:
self.conv_layer_name = "conv"
self.ConvLayer = nn.Conv2d

self.set_batchnorm_type()
Expand Down Expand Up @@ -202,7 +206,7 @@ def add_subsequent_layers(self):
layer = OrderedDict()
if self.hidden_padding is None:
self.hidden_padding = ((self.hidden_kern[l - 1] - 1) * self.hidden_dilation + 1) // 2
layer["conv"] = self.ConvLayer(
layer[self.conv_layer_name] = self.ConvLayer(
in_channels=self.hidden_channels if not self.skip > 1 else min(self.skip, l) * self.hidden_channels,
out_channels=self.hidden_channels,
kernel_size=self.hidden_kern[l - 1],
Expand Down Expand Up @@ -332,7 +336,7 @@ def set_batchnorm_type(self):

def add_first_layer(self):
layer = OrderedDict()
layer["conv"] = HermiteConv2D(
layer["hermite_conv"] = HermiteConv2D(
input_features=self.input_channels,
output_features=self.hidden_channels,
num_rotations=self.num_rotations,
Expand All @@ -356,7 +360,7 @@ def add_subsequent_layers(self):
if self.hidden_padding is None:
self.hidden_padding = self.hidden_kern[l - 1] // 2

layer["conv"] = HermiteConv2D(
layer["hermite_conv"] = HermiteConv2D(
input_features=self.hidden_channels * self.num_rotations,
output_features=self.hidden_channels,
num_rotations=self.num_rotations,
Expand Down Expand Up @@ -566,7 +570,7 @@ def add_subsequent_layers(self):
layer = OrderedDict()
if self.hidden_padding is None:
self.hidden_padding = ((self.hidden_kern[l - 1] - 1) * self.hidden_dilation + 1) // 2
layer["conv"] = self.ConvLayer(
layer[self.conv_layer_name] = self.ConvLayer(
in_channels=self.hidden_channels if not self.skip > 1 else min(self.skip, l) * self.hidden_channels,
out_channels=self.hidden_channels,
kernel_size=self.hidden_kern[l - 1],
Expand Down
2 changes: 1 addition & 1 deletion neuralpredictors/layers/encoders/firing_rate.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@


class FiringRateEncoder(nn.Module):
def __init__(self, core, readout, shifter=None, modulator=None, elu_offset=0.0):
def __init__(self, core, readout, *, shifter=None, modulator=None, elu_offset=0.0):
"""
An Encoder that wraps the core, readout and optionally a shifter amd modulator into one model.
The output is one positive value that can be interpreted as a firing rate, for example for a Poisson distribution.
Expand Down
1 change: 1 addition & 0 deletions neuralpredictors/measures/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .np_functions import corr
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
numpy
numpy>=1.20
torch
tqdm
pandas
Expand Down

0 comments on commit 6b732af

Please sign in to comment.