From a6717abb97190ea27fcb6a58f10560498b0f5d08 Mon Sep 17 00:00:00 2001 From: Pooja Babu Date: Fri, 26 Jan 2024 11:05:40 +0100 Subject: [PATCH] Modify templates --- .../printers/nest_gpu_variable_printer.py | 2 +- .../point_neuron/@NEURON_NAME@.cu.jinja2 | 50 +++++++++++-------- .../point_neuron/@NEURON_NAME@.h.jinja2 | 2 +- .../@NEURON_NAME@_kernel.h.jinja2 | 4 +- .../nest_gpu_code_generator_test.py | 12 +++++ 5 files changed, 45 insertions(+), 25 deletions(-) diff --git a/pynestml/codegeneration/printers/nest_gpu_variable_printer.py b/pynestml/codegeneration/printers/nest_gpu_variable_printer.py index 1e442b332..e27953a65 100644 --- a/pynestml/codegeneration/printers/nest_gpu_variable_printer.py +++ b/pynestml/codegeneration/printers/nest_gpu_variable_printer.py @@ -63,7 +63,7 @@ def print_variable(self, variable: ASTVariable) -> str: return "((post_neuron_t*)(__target))->get_" + _name + "(_tr_t)" if variable.get_name() == PredefinedVariables.E_CONSTANT: - return "numerics::e" + return "M_E" symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE) diff --git a/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@.cu.jinja2 b/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@.cu.jinja2 index 2e7feb2ae..58168dd0d 100644 --- a/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@.cu.jinja2 +++ b/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@.cu.jinja2 @@ -24,11 +24,15 @@ #include #include #include "{{ neuronName }}.h" +{%- if not uses_numeric_solver %} +#include "{{ neuronName }}_kernel.h" +#include "rk5.h" +{%- endif %} #include "spike_buffer.h" {%- import 'directives/SetScalParamAndVar.jinja2' as set_scal_param_var with context %} -{%- if uses_analytic_solver %} +{%- if not uses_numeric_solver %} using namespace {{ neuronName }}_ns; __global__ void {{ neuronName }}_Calibrate(int n_node, float *param_arr, @@ -74,13 +78,14 @@ __global__ void {{ neuronName }}_Update(int n_node, int i_node_0, float *var_arr {%- endif %} } } -{%- endif %} + {{ neuronName }}::~{{ neuronName }}() { FreeVarArr(); FreeParamArr(); } +{%- endif %} {%- if uses_numeric_solver %} namespace {{neuronName}}_ns @@ -97,8 +102,10 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param, // Internal variables {%- for variable_symbol in neuron.get_internal_symbols() %} +{%- if variable_symbol.get_symbol_name() != "__h" %} {%- set variable = utils.get_internal_variable_by_name(astnode, variable_symbol.get_symbol_name()) %} {%- include "directives/MemberInitialization.jinja2" %} +{%- endif %} {%- endfor %} // State variables @@ -112,12 +119,14 @@ __device__ void NodeCalibrate(int n_var, int n_param, double x, float *y, float *param, {{neuronName}}_rk5 data_struct) { - refractory_step = 0; + // refractory_step = 0; {%- filter indent(4,True) %} {%- for internals_block in neuron.get_internals_blocks() %} {%- for decl in internals_block.get_declarations() %} {%- for variable in decl.get_variables() %} +{%- if variable.get_name() != "h" %} {%- include "directives/MemberInitialization.jinja2" %} +{%- endif %} {%- endfor %} {%- endfor %} {%- endfor %} @@ -140,7 +149,6 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y, { {{neuronName}}_ns::NodeCalibrate(n_var, n_param, x, y, param, data_struct); } - using namespace aeif_cond_alpha_ns; {%- endif %} int {{ neuronName }}::Init(int i_node_0, int n_node, int /*n_port*/, @@ -165,7 +173,7 @@ int {{ neuronName }}::Init(int i_node_0, int n_node, int /*n_port*/, scal_param_name_ = {{ neuronName }}_scal_param_name; {%- if uses_numeric_solver %} - group_param_name_ = aeif_cond_alpha_group_param_name; + group_param_name_ = {{neuronName}}_group_param_name; rk5_data_struct_.i_node_0_ = i_node_0_; SetGroupParam("h_min_rel", 1.0e-3); @@ -219,18 +227,6 @@ int {{ neuronName }}::Init(int i_node_0, int n_node, int /*n_port*/, return 0; } -int {{ neuronName }}::Update(long long it, double t1) -{ -{%- if uses_analytic_solver %} - {{ neuronName }}_Update<<<(n_node_+1023)/1024, 1024>>> - (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); - // gpuErrchk( cudaDeviceSynchronize() ); -{%- else %} - rk5_.Update(t1, h_min_, rk5_data_struct_); -{%- endif %} - return 0; -} - int {{ neuronName }}::Free() { FreeVarArr(); @@ -241,13 +237,25 @@ int {{ neuronName }}::Free() int {{ neuronName }}::Calibrate(double time_min, float time_resolution) { -{%- if uses_analytic_solver %} - {{ neuronName }}_Calibrate<<<(n_node_+1023)/1024, 1024>>> - (n_node_, param_arr_, n_param_, time_resolution); -{%- else %} +{%- if uses_numeric_solver %} h_min_ = h_min_rel_* time_resolution; h_ = h0_rel_* time_resolution; rk5_.Calibrate(time_min, h_, rk5_data_struct_); +{%- else %} + {{ neuronName }}_Calibrate<<<(n_node_+1023)/1024, 1024>>> + (n_node_, param_arr_, n_param_, time_resolution); {%- endif %} return 0; } + +int {{ neuronName }}::Update(long long it, double t1) +{ +{%- if uses_numeric_solver %} + rk5_.Update(t1, h_min_, rk5_data_struct_); +{%- else %} + {{ neuronName }}_Update<<<(n_node_+1023)/1024, 1024>>> + (n_node_, i_node_0_, var_arr_, param_arr_, n_var_, n_param_); + // gpuErrchk( cudaDeviceSynchronize() ); +{%- endif %} + return 0; +} \ No newline at end of file diff --git a/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@.h.jinja2 b/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@.h.jinja2 index 9cfafd492..8c46a4145 100644 --- a/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@.h.jinja2 +++ b/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@.h.jinja2 @@ -136,7 +136,7 @@ const std::string {{ neuronName }}_group_param_name[N_GROUP_PARAM] = { #} {%- if uses_numeric_solver %} -{%- for variable_symbol in utils.adjusted_state_symbols() %} +{%- for variable_symbol in utils.adjusted_state_symbols(neuron) %} {%- set variable = utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name()) %} #define {{ printer_no_origin.print(variable) }} y[i_{{ printer_no_origin.print(variable) }}] {%- endfor %} diff --git a/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@_kernel.h.jinja2 b/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@_kernel.h.jinja2 index 2ac490373..1f63150b3 100644 --- a/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@_kernel.h.jinja2 +++ b/pynestml/codegeneration/resources_nest_gpu/point_neuron/@NEURON_NAME@_kernel.h.jinja2 @@ -24,8 +24,8 @@ -#ifndef {{ neuronName.upper() }}_KERNEL)_H -#define {{ neuronName.upper() }}_KERNEL)_H +#ifndef {{ neuronName.upper() }}_KERNEL_H +#define {{ neuronName.upper() }}_KERNEL_H #include #include diff --git a/tests/nest_gpu_tests/nest_gpu_code_generator_test.py b/tests/nest_gpu_tests/nest_gpu_code_generator_test.py index cbbe592a4..bfe8be2d8 100644 --- a/tests/nest_gpu_tests/nest_gpu_code_generator_test.py +++ b/tests/nest_gpu_tests/nest_gpu_code_generator_test.py @@ -36,3 +36,15 @@ def test_nest_gpu_code_generator(self): generate_nest_gpu_target(input_path, target_path, logging_level=logging_level, suffix=suffix) + + def test_nest_gpu_code_generator_numeric(self): + input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.join( + os.pardir, os.pardir, "models", "neurons", "aeif_cond_alpha.nestml")))) + target_path = "target_gpu_numeric" + logging_level = "INFO" + suffix = "_nestml" + codegen_opts = {"solver": "numeric"} + generate_nest_gpu_target(input_path, target_path, + logging_level=logging_level, + suffix=suffix, + codegen_opts=codegen_opts)