Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix multiple issues with sparse synapse index narrowing #460

Merged
merged 4 commits into from
Sep 22, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions include/genn/genn/synapseGroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,14 @@ class GENN_EXPORT SynapseGroup
//! Is this synapse group a weight-sharing slave
bool isWeightSharingSlave() const { return (getWeightSharingMaster() != nullptr); }

//! Has this synapse group's postsynaptic model been merged with others
/*! NOTE: this can only be called after model is finalized but needs to be public for PyGeNN */
bool isPSModelMerged() const{ return m_PSModelTargetName != getName(); }

//! Get the type to use for sparse connectivity indices for synapse group
//! /*! NOTE: this can only be called after model is finalized but needs to be public for PyGeNN */
std::string getSparseIndType() const;

const WeightUpdateModels::Base *getWUModel() const{ return m_WUModel; }

const std::vector<double> &getWUParams() const{ return m_WUParams; }
Expand Down Expand Up @@ -283,10 +289,7 @@ class GENN_EXPORT SynapseGroup
/*! This is required when the pre-synaptic neuron population's outgoing synapse groups require different event threshold */
bool isEventThresholdReTestRequired() const{ return m_EventThresholdReTestRequired; }

const std::string &getPSModelTargetName() const{ return m_PSModelTargetName; }

//! Get the type to use for sparse connectivity indices for synapse group
std::string getSparseIndType() const;
const std::string &getPSModelTargetName() const{ return m_PSModelTargetName; }

//! Are any of this synapse group's weight update model variables referenced by a custom update
bool areWUVarReferencedByCustomUpdate() const { return m_WUVarReferencedByCustomUpdate; }
Expand Down
2 changes: 1 addition & 1 deletion pygenn/genn_groups.py
Original file line number Diff line number Diff line change
Expand Up @@ -1050,7 +1050,7 @@ def load(self):
# Get pointers to ragged data structure members
ind = self._assign_ext_ptr_array("ind",
self.weight_update_var_size,
"unsigned int")
self.pop.get_sparse_ind_type())
row_length = self._assign_ext_ptr_array("rowLength",
self.src.size,
"unsigned int")
Expand Down
2 changes: 1 addition & 1 deletion src/genn/genn/code_generator/generateRunner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1228,7 +1228,7 @@ MemAlloc CodeGenerator::generateRunner(const filesystem::path &outputPath, const
s.second.getSparseConnectivityLocation(), autoInitialized, s.second.getSrcNeuronGroup()->getNumNeurons());

// Target indices
backend.genVariablePushPull(runnerPushFunc, runnerPullFunc, "unsigned int", "ind" + s.second.getName(),
backend.genVariablePushPull(runnerPushFunc, runnerPullFunc, s.second.getSparseIndType(), "ind" + s.second.getName(),
s.second.getSparseConnectivityLocation(), autoInitialized, size);
});
}
Expand Down
39 changes: 19 additions & 20 deletions src/genn/genn/synapseGroup.cc
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,25 @@ bool SynapseGroup::isSpikeEventRequired() const
return !getWUModel()->getEventCode().empty();
}
//----------------------------------------------------------------------------
std::string SynapseGroup::getSparseIndType() const
{
// If narrow sparse inds are enabled
if(m_NarrowSparseIndEnabled) {
// If number of target neurons can be represented using a uint8, use this type
const unsigned int numTrgNeurons = getTrgNeuronGroup()->getNumNeurons();
if(numTrgNeurons <= std::numeric_limits<uint8_t>::max()) {
return "uint8_t";
}
// Otherwise, if they can be represented as a uint16, use this type
else if(numTrgNeurons <= std::numeric_limits<uint16_t>::max()) {
return "uint16_t";
}
}

// Otherwise, use 32-bit int
return "uint32_t";
}
//----------------------------------------------------------------------------
const std::vector<double> SynapseGroup::getWUConstInitVals() const
{
return getConstInitVals(m_WUVarInitialisers);
Expand Down Expand Up @@ -590,26 +609,6 @@ void SynapseGroup::initDerivedParams(double dt)
m_ConnectivityInitialiser.initDerivedParams(dt);
}
//----------------------------------------------------------------------------
std::string SynapseGroup::getSparseIndType() const
{
// If narrow sparse inds are enabled
if(m_NarrowSparseIndEnabled) {
// If number of target neurons can be represented using a uint8, use this type
const unsigned int numTrgNeurons = getTrgNeuronGroup()->getNumNeurons();
if(numTrgNeurons <= std::numeric_limits<uint8_t>::max()) {
return "uint8_t";
}
// Otherwise, if they can be represented as a uint16, use this type
else if(numTrgNeurons <= std::numeric_limits<uint16_t>::max()) {
return "uint16_t";
}
}

// Otherwise, use 32-bit int
return "uint32_t";

}
//----------------------------------------------------------------------------
bool SynapseGroup::canPSBeLinearlyCombined() const
{
// Return true if there are no variables or extra global parameters
Expand Down
1 change: 1 addition & 0 deletions tests/features/connect_init/model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ void modelDefinition(ModelSpec &model)
#endif
model.setDT(0.1);
model.setName("connect_init");
model.setDefaultNarrowSparseIndEnabled(true);

NeuronModels::LIF::ParamValues lifParams(
0.25, // 0 - C
Expand Down
4 changes: 2 additions & 2 deletions tests/features/connect_init/test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ class SimTest : public SimulationTest
{
};

template<size_t N>
void calcHistogram(const unsigned int *rowLength, const uint32_t *ind,
template<size_t N, typename I>
void calcHistogram(const unsigned int *rowLength, const I *ind,
unsigned int maxRowLength, std::array<unsigned int, N> &histogram)
{
// Loop through rows
Expand Down