Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expose num #576

Merged
merged 7 commits into from
Mar 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,15 @@
**/*.suo
**/*.sdf
**/*.vcxproj.user
**/*.vcxproj.filters
**/*.opensdf
**/x64
**/.vs
**/*.ipdb
**/*.iobj
**/*.testdurations

/dist
/lib
/documentation/
/build/
Expand Down
39 changes: 25 additions & 14 deletions src/genn/genn/code_generator/customUpdateGroupMerged.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,9 @@ boost::uuids::detail::sha1::digest_type CustomUpdateGroupMerged::getHashDigest()
//----------------------------------------------------------------------------
void CustomUpdateGroupMerged::generateCustomUpdate(const BackendBase&, CodeStream &os, const ModelSpecMerged &modelMerged, Substitutions &popSubs) const
{
popSubs.addVarSubstitution("num_batch", std::to_string(getArchetype().isBatched() ? modelMerged.getModel().getBatchSize() : 1));
popSubs.addVarSubstitution("num", "group->size");

genCustomUpdate(os, popSubs, *this, modelMerged, "id",
[this](const Models::VarReference &varRef, const std::string &index)
{
Expand Down Expand Up @@ -272,6 +275,20 @@ CustomUpdateWUGroupMergedBase::CustomUpdateWUGroupMergedBase(size_t index, const
const std::vector<std::reference_wrapper<const CustomUpdateWUInternal>> &groups)
: GroupMerged<CustomUpdateWUInternal>(index, precision, groups)
{
addField("unsigned int", "numSrcNeurons",
[](const CustomUpdateWUInternal &cg, size_t)
{
const SynapseGroupInternal *sgInternal = static_cast<const SynapseGroupInternal*>(cg.getSynapseGroup());
return std::to_string(sgInternal->getSrcNeuronGroup()->getNumNeurons());
});

addField("unsigned int", "numTrgNeurons",
[](const CustomUpdateWUInternal &cg, size_t)
{
const SynapseGroupInternal *sgInternal = static_cast<const SynapseGroupInternal*>(cg.getSynapseGroup());
return std::to_string(sgInternal->getTrgNeuronGroup()->getNumNeurons());
});

// If underlying synapse group has kernel weights
if (getArchetype().getSynapseGroup()->getMatrixType() & SynapseMatrixWeight::KERNEL) {
// Loop through kernel size dimensions
Expand All @@ -294,20 +311,6 @@ CustomUpdateWUGroupMergedBase::CustomUpdateWUGroupMergedBase(size_t index, const
const SynapseGroupInternal *sgInternal = static_cast<const SynapseGroupInternal*>(cg.getSynapseGroup());
return std::to_string(backend.getSynapticMatrixRowStride(*sgInternal));
});

addField("unsigned int", "numSrcNeurons",
[](const CustomUpdateWUInternal &cg, size_t)
{
const SynapseGroupInternal *sgInternal = static_cast<const SynapseGroupInternal*>(cg.getSynapseGroup());
return std::to_string(sgInternal->getSrcNeuronGroup()->getNumNeurons());
});

addField("unsigned int", "numTrgNeurons",
[](const CustomUpdateWUInternal &cg, size_t)
{
const SynapseGroupInternal *sgInternal = static_cast<const SynapseGroupInternal*>(cg.getSynapseGroup());
return std::to_string(sgInternal->getTrgNeuronGroup()->getNumNeurons());
});

// If synapse group has sparse connectivity
if(getArchetype().getSynapseGroup()->getMatrixType() & SynapseMatrixConnectivity::SPARSE) {
Expand Down Expand Up @@ -370,6 +373,10 @@ const std::string CustomUpdateWUGroupMerged::name = "CustomUpdateWU";
//----------------------------------------------------------------------------
void CustomUpdateWUGroupMerged::generateCustomUpdate(const BackendBase&, CodeStream &os, const ModelSpecMerged &modelMerged, Substitutions &popSubs) const
{
popSubs.addVarSubstitution("num_batch", std::to_string(getArchetype().isBatched() ? modelMerged.getModel().getBatchSize() : 1));
popSubs.addVarSubstitution("num_pre", "group->numSrcNeurons");
popSubs.addVarSubstitution("num_post", "group->numTrgNeurons");

genCustomUpdate(os, popSubs, *this, modelMerged, "id_syn",
[this, &modelMerged](const Models::WUVarReference &varRef, const std::string &index)
{
Expand All @@ -385,6 +392,10 @@ const std::string CustomUpdateTransposeWUGroupMerged::name = "CustomUpdateTransp
//----------------------------------------------------------------------------
void CustomUpdateTransposeWUGroupMerged::generateCustomUpdate(const BackendBase&, CodeStream &os, const ModelSpecMerged &modelMerged, Substitutions &popSubs) const
{
popSubs.addVarSubstitution("num_batch", std::to_string(getArchetype().isBatched() ? modelMerged.getModel().getBatchSize() : 1));
popSubs.addVarSubstitution("num_pre", "group->numSrcNeurons");
popSubs.addVarSubstitution("num_post", "group->numTrgNeurons");

genCustomUpdate(os, popSubs, *this, modelMerged, "id_syn",
[this, &modelMerged](const Models::WUVarReference &varRef, const std::string &index)
{
Expand Down
11 changes: 6 additions & 5 deletions src/genn/genn/code_generator/groupMerged.cc
Original file line number Diff line number Diff line change
Expand Up @@ -862,14 +862,15 @@ SynapseGroupMergedBase::SynapseGroupMergedBase(size_t index, const std::string &
|| (role == Role::SynapseDynamics));
const WeightUpdateModels::Base *wum = getArchetype().getWUModel();

addField("unsigned int", "numSrcNeurons",
[](const SynapseGroupInternal &sg, size_t) { return std::to_string(sg.getSrcNeuronGroup()->getNumNeurons()); });
addField("unsigned int", "numTrgNeurons",
[](const SynapseGroupInternal &sg, size_t) { return std::to_string(sg.getTrgNeuronGroup()->getNumNeurons()); });

// If role isn't an init role or weights aren't kernel
if(role != Role::Init || !(getArchetype().getMatrixType() & SynapseMatrixWeight::KERNEL)) {
if (role != Role::Init || !(getArchetype().getMatrixType() & SynapseMatrixWeight::KERNEL)) {
addField("unsigned int", "rowStride",
[&backend](const SynapseGroupInternal &sg, size_t) { return std::to_string(backend.getSynapticMatrixRowStride(sg)); });
addField("unsigned int", "numSrcNeurons",
[](const SynapseGroupInternal &sg, size_t) { return std::to_string(sg.getSrcNeuronGroup()->getNumNeurons()); });
addField("unsigned int", "numTrgNeurons",
[](const SynapseGroupInternal &sg, size_t) { return std::to_string(sg.getTrgNeuronGroup()->getNumNeurons()); });
}

if(role == Role::PostsynapticUpdate || role == Role::SparseInit) {
Expand Down
44 changes: 26 additions & 18 deletions src/genn/genn/code_generator/initGroupMerged.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ void genInitNeuronVarCode(CodeStream &os, const BackendBase &backend, const Subs
CodeStream::Scope b(os);

Substitutions varSubs(&popSubs);
varSubs.addVarSubstitution("num_batch", std::to_string(batchSize));
varSubs.addVarSubstitution("num", count);

// Substitute in parameters and derived parameters for initialising variables
varSubs.addParamValueSubstitution(varInit.getSnippet()->getParamNames(), varInit.getParams(),
Expand Down Expand Up @@ -147,31 +149,36 @@ void genInitWUVarCode(CodeStream &os, const Substitutions &popSubs,
// If this variable has any initialisation code and doesn't require a kernel
if(!varInit.getSnippet()->getCode().empty() && !varInit.getSnippet()->requiresKernel()) {
CodeStream::Scope b(os);

Substitutions varSubs(&popSubs);
varSubs.addVarSubstitution("num_batch", std::to_string(batchSize));
varSubs.addVarSubstitution("num_pre", "group->numSrcNeurons");
varSubs.addVarSubstitution("num_post", "group->numTrgNeurons");

// Generate target-specific code to initialise variable
genSynapseVariableRowInitFn(os, popSubs,
genSynapseVariableRowInitFn(os, varSubs,
[&vars, &varInit, &ftype, &stride, batchSize, k, groupIndex, isParamHeterogeneousFn, isDerivedParamHeterogeneousFn]
(CodeStream &os, Substitutions &varSubs)
(CodeStream &os, Substitutions &varInitSubs)
{
varSubs.addParamValueSubstitution(varInit.getSnippet()->getParamNames(), varInit.getParams(),
[k, isParamHeterogeneousFn](size_t p) { return isParamHeterogeneousFn(k, p); },
"", "group->", vars[k].name);
varSubs.addVarValueSubstitution(varInit.getSnippet()->getDerivedParams(), varInit.getDerivedParams(),
[k, isDerivedParamHeterogeneousFn](size_t p) { return isDerivedParamHeterogeneousFn(k, p); },
"", "group->", vars[k].name);
varSubs.addVarNameSubstitution(varInit.getSnippet()->getExtraGlobalParams(),
"", "group->", vars[k].name);
varInitSubs.addParamValueSubstitution(varInit.getSnippet()->getParamNames(), varInit.getParams(),
[k, isParamHeterogeneousFn](size_t p) { return isParamHeterogeneousFn(k, p); },
"", "group->", vars[k].name);
varInitSubs.addVarValueSubstitution(varInit.getSnippet()->getDerivedParams(), varInit.getDerivedParams(),
[k, isDerivedParamHeterogeneousFn](size_t p) { return isDerivedParamHeterogeneousFn(k, p); },
"", "group->", vars[k].name);
varInitSubs.addVarNameSubstitution(varInit.getSnippet()->getExtraGlobalParams(),
"", "group->", vars[k].name);

// Generate initial value into temporary variable
os << vars[k].type << " initVal;" << std::endl;
varSubs.addVarSubstitution("value", "initVal");
varInitSubs.addVarSubstitution("value", "initVal");
std::string code = varInit.getSnippet()->getCode();
varSubs.applyCheckUnreplaced(code, "initVar : merged" + vars[k].name + std::to_string(groupIndex));
varInitSubs.applyCheckUnreplaced(code, "initVar : merged" + vars[k].name + std::to_string(groupIndex));
code = ensureFtype(code, ftype);
os << code << std::endl;

// Fill value across all batches
genVariableFill(os, vars[k].name, "initVal", varSubs["id_syn"], stride,
genVariableFill(os, vars[k].name, "initVal", varInitSubs["id_syn"], stride,
getVarAccessDuplication(vars[k].access), batchSize);
});
}
Expand Down Expand Up @@ -333,7 +340,7 @@ void NeuronInitGroupMerged::generateInit(const BackendBase &backend, CodeStream
os << "*group->spkQuePtr = 0;" << std::endl;
});
}

// Initialise neuron variables
genInitNeuronVarCode(os, backend, popSubs, getArchetype().getNeuronModel()->getVars(), getArchetype().getVarInitialisers(),
"", "numNeurons", getArchetype().getNumDelaySlots(), getIndex(), model.getPrecision(), model.getBatchSize(),
Expand Down Expand Up @@ -780,6 +787,11 @@ CustomWUUpdateInitGroupMerged::CustomWUUpdateInitGroupMerged(size_t index, const
const std::vector<std::reference_wrapper<const CustomUpdateWUInternal>> &groups)
: CustomUpdateInitGroupMergedBase<CustomUpdateWUInternal>(index, precision, backend, groups)
{
addField("unsigned int", "numSrcNeurons",
[](const CustomUpdateWUInternal &cg, size_t) { return std::to_string(cg.getSynapseGroup()->getSrcNeuronGroup()->getNumNeurons()); });
addField("unsigned int", "numTrgNeurons",
[](const CustomUpdateWUInternal &cg, size_t) { return std::to_string(cg.getSynapseGroup()->getTrgNeuronGroup()->getNumNeurons()); });

if(getArchetype().getSynapseGroup()->getMatrixType() & SynapseMatrixWeight::KERNEL) {
// Loop through kernel size dimensions
for (size_t d = 0; d < getArchetype().getSynapseGroup()->getKernelSize().size(); d++) {
Expand All @@ -793,10 +805,6 @@ CustomWUUpdateInitGroupMerged::CustomWUUpdateInitGroupMerged(size_t index, const
else {
addField("unsigned int", "rowStride",
[&backend](const CustomUpdateWUInternal &cg, size_t) { return std::to_string(backend.getSynapticMatrixRowStride(*cg.getSynapseGroup())); });
addField("unsigned int", "numSrcNeurons",
[](const CustomUpdateWUInternal &cg, size_t) { return std::to_string(cg.getSynapseGroup()->getSrcNeuronGroup()->getNumNeurons()); });
addField("unsigned int", "numTrgNeurons",
[](const CustomUpdateWUInternal &cg, size_t) { return std::to_string(cg.getSynapseGroup()->getTrgNeuronGroup()->getNumNeurons()); });
}
}
//----------------------------------------------------------------------------
Expand Down
3 changes: 3 additions & 0 deletions src/genn/genn/code_generator/neuronUpdateGroupMerged.cc
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,9 @@ void NeuronUpdateGroupMerged::generateNeuronUpdate(const BackendBase &backend, C
const unsigned int batchSize = model.getBatchSize();
const NeuronModels::Base *nm = getArchetype().getNeuronModel();

popSubs.addVarSubstitution("num_batch", std::to_string(batchSize));
popSubs.addVarSubstitution("num", "group->numNeurons");

// Generate code to copy neuron state into local variable
for(const auto &v : nm->getVars()) {
if(v.access & VarAccessMode::READ_ONLY) {
Expand Down
3 changes: 3 additions & 0 deletions src/genn/genn/code_generator/synapseUpdateGroupMerged.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ void applySynapseSubstitutions(CodeStream &os, std::string code, const std::stri
const auto *wu = sg.getArchetype().getWUModel();

Substitutions synapseSubs(&baseSubs);
synapseSubs.addVarSubstitution("num_batch", std::to_string(batchSize));
synapseSubs.addVarSubstitution("num_pre", "group->numSrcNeurons");
synapseSubs.addVarSubstitution("num_post", "group->numTrgNeurons");

// Substitute parameter and derived parameter names
synapseSubs.addParamValueSubstitution(wu->getParamNames(), sg.getArchetype().getWUParams(),
Expand Down
148 changes: 148 additions & 0 deletions tests/features/num/model.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
//--------------------------------------------------------------------------
/*! \file num/model.cc

\brief model definition file that is part of the feature testing
suite of minimal models with known analytic outcomes that are used for continuous integration testing.
*/
//--------------------------------------------------------------------------


#include "modelSpec.h"

//----------------------------------------------------------------------------
// Neuron
//----------------------------------------------------------------------------
class Neuron : public NeuronModels::Base
{
public:
DECLARE_MODEL(Neuron, 0, 2);

SET_VARS({{"num_test", "unsigned int"}, {"num_batch_test", "unsigned int"}});

SET_SIM_CODE(
"$(num_test)= $(num);\n"
"$(num_batch_test) = $(num_batch);\n");
};
IMPLEMENT_MODEL(Neuron);

//----------------------------------------------------------------------------
// PSM
//----------------------------------------------------------------------------
class PSM : public PostsynapticModels::Base
{
public:
DECLARE_MODEL(PSM, 0, 2);

SET_VARS({{"num_psm_test", "unsigned int"}, {"num_batch_psm_test", "unsigned int"}});

SET_DECAY_CODE(
"$(num_psm_test)= $(num);\n"
"$(num_batch_psm_test) = $(num_batch);\n");
};
IMPLEMENT_MODEL(PSM);

//----------------------------------------------------------------------------
// CS
//----------------------------------------------------------------------------
class CS : public CurrentSourceModels::Base
{
public:
DECLARE_MODEL(CS, 0, 2);

SET_VARS({{"num_cs_test", "unsigned int"}, {"num_batch_cs_test", "unsigned int"}});

SET_INJECTION_CODE(
"$(num_cs_test)= $(num);\n"
"$(num_batch_cs_test) = $(num_batch);\n");
};
IMPLEMENT_MODEL(CS);

//----------------------------------------------------------------------------
// WUM
//----------------------------------------------------------------------------
class WUM : public WeightUpdateModels::Base
{
public:
DECLARE_WEIGHT_UPDATE_MODEL(WUM, 0, 3, 2, 2);

SET_VARS({{"num_pre_wum_syn_test", "unsigned int"}, {"num_post_wum_syn_test", "unsigned int"}, {"num_batch_wum_syn_test", "unsigned int"}});
SET_PRE_VARS({{"num_wum_pre_test", "unsigned int"}, {"num_batch_wum_pre_test", "unsigned int"}});
SET_POST_VARS({{"num_wum_post_test", "unsigned int"}, {"num_batch_wum_post_test", "unsigned int"}});

SET_SYNAPSE_DYNAMICS_CODE(
"$(num_pre_wum_syn_test)= $(num_pre);\n"
"$(num_post_wum_syn_test)= $(num_post);\n"
"$(num_batch_wum_syn_test) = $(num_batch);\n");
SET_PRE_DYNAMICS_CODE(
"$(num_wum_pre_test)= $(num);\n"
"$(num_batch_wum_pre_test) = $(num_batch);\n");
SET_POST_DYNAMICS_CODE(
"$(num_wum_post_test)= $(num);\n"
"$(num_batch_wum_post_test) = $(num_batch);\n");
};
IMPLEMENT_MODEL(WUM);

//----------------------------------------------------------------------------
// CU
//----------------------------------------------------------------------------
class CU : public CustomUpdateModels::Base
{
DECLARE_CUSTOM_UPDATE_MODEL(CU, 0, 2, 1);

SET_VARS({{"num_test", "unsigned int"}, {"num_batch_test", "unsigned int"}});
SET_VAR_REFS({{"ref", "unsigned int", VarAccessMode::READ_ONLY}});

SET_UPDATE_CODE(
"$(num_test)= $(num);\n"
"$(num_batch_test) = $(num_batch);\n");
};
IMPLEMENT_MODEL(CU);

//----------------------------------------------------------------------------
// CUWUM
//----------------------------------------------------------------------------
class CUWUM : public CustomUpdateModels::Base
{
DECLARE_CUSTOM_UPDATE_MODEL(CUWUM, 0, 3, 1);

SET_VARS({{"num_pre_test", "unsigned int"}, {"num_post_test", "unsigned int"}, {"num_batch_test", "unsigned int"}});
SET_VAR_REFS({{"ref", "unsigned int", VarAccessMode::READ_ONLY}});

SET_UPDATE_CODE(
"$(num_pre_test)= $(num_pre);\n"
"$(num_post_test)= $(num_post);\n"
"$(num_batch_test) = $(num_batch);\n");
};
IMPLEMENT_MODEL(CUWUM);

void modelDefinition(ModelSpec &model)
{
#ifdef CL_HPP_TARGET_OPENCL_VERSION
if(std::getenv("OPENCL_DEVICE") != nullptr) {
GENN_PREFERENCES.deviceSelectMethod = DeviceSelect::MANUAL;
GENN_PREFERENCES.manualDeviceID = std::atoi(std::getenv("OPENCL_DEVICE"));
}
if(std::getenv("OPENCL_PLATFORM") != nullptr) {
GENN_PREFERENCES.manualPlatformID = std::atoi(std::getenv("OPENCL_PLATFORM"));
}
#endif
model.setDT(1.0);
model.setName("num");

model.addNeuronPopulation<NeuronModels::SpikeSource>("Pre", 2, {}, {});
auto *post = model.addNeuronPopulation<Neuron>("Post", 4, {}, {0, 0});
model.addCurrentSource<CS>("CurrSource", "Post", {}, {0, 0});
auto *syn = model.addSynapsePopulation<WUM, PSM>(
"Syn", SynapseMatrixType::DENSE_INDIVIDUALG, NO_DELAY, "Pre", "Post",
{}, {0, 0, 0}, {0, 0}, {0, 0},
{}, {0, 0});

CU::VarReferences varReferences(createVarRef(post, "num_test")); // ref
model.addCustomUpdate<CU>("CU", "Test", {}, {0, 0}, varReferences);

CUWUM::WUVarReferences wuVarReferences(createWUVarRef(syn, "num_pre_wum_syn_test")); // R
model.addCustomUpdate<CUWUM>("CUWM", "Test", {}, {0, 0, 0}, wuVarReferences);


model.setPrecision(GENN_FLOAT);
}
Loading