Skip to content

Commit

Permalink
Runge kutta
Browse files Browse the repository at this point in the history
  • Loading branch information
amritagos authored and Moritz Sallermann committed Oct 15, 2023
1 parent f5939ea commit 1bb17ea
Show file tree
Hide file tree
Showing 5 changed files with 72 additions and 22 deletions.
3 changes: 3 additions & 0 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
std::set<std::pair<size_t, size_t>> reciprocal_edge_buffer{};

void get_euler_slopes( std::vector<double> & k_previous, double factor, std::vector<double> & k_buffer );
void get_euler_slopes( std::vector<double> & k_buffer );

public:
// Model-specific parameters
double dt = 0.01; // Timestep for the integration of the coupled ODEs
Expand Down
2 changes: 1 addition & 1 deletion include/network.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class Network
std::size_t agent_idx, const std::vector<size_t> & buffer_neighbours,
const std::vector<WeightT> & buffer_weights );

void push_back_neighbour_and_weight(size_t i, size_t j, WeightT w);
void push_back_neighbour_and_weight( size_t i, size_t j, WeightT w );

void transpose();

Expand Down
2 changes: 1 addition & 1 deletion src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ int main( int argc, char * argv[] )

auto simulation = Seldon::Simulation( config_file_path.string(), network_file, agent_file );

fmt::print( "{}", simulation.model->get_agent(0)->to_string() );
fmt::print( "{}", simulation.model->get_agent( 0 )->to_string() );

Seldon::IO::network_to_dot_file( *simulation.network, ( output_dir_path / fs::path( "network.dot" ) ).string() );
Seldon::IO::network_to_file( simulation, ( output_dir_path / fs::path( "network.txt" ) ).string() );
Expand Down
77 changes: 63 additions & 14 deletions src/models/ActivityDrivenModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ void Seldon::ActivityAgentModel::iteration()

// Implement the weight for the probability of agent `idx_agent` contacting agent `j`
// Not normalised since this is taken care of by the reservoir sampling
auto weight_callback = [idx_agent, this]( size_t j ) {
auto weight_callback = [idx_agent, this]( size_t j )
{
if( idx_agent == j ) // The agent does not contact itself
return 0.0;
return std::pow(
Expand Down Expand Up @@ -92,30 +93,78 @@ void Seldon::ActivityAgentModel::iteration()

network.transpose(); // transpose the network, so that we have incoming edges

// Integrate the ODE
// Integrate the ODE using 4th order Runge-Kutta
auto k1_buffer = std::vector<double>();
auto k2_buffer = std::vector<double>();
auto k3_buffer = std::vector<double>();
auto k4_buffer = std::vector<double>();

// k_1 = hf(x_n,y_n)
get_euler_slopes( k1_buffer );

// k_2 = hf(x_n+1/2h,y_n+1/2k_1)
get_euler_slopes( k1_buffer, 0.5, k2_buffer );

// k_3 = hf(x_n+1/2h,y_n+1/2k_2)
get_euler_slopes( k2_buffer, 0.5, k3_buffer );

// k_4 = hf(x_n+h,y_n+k_3)
get_euler_slopes( k3_buffer, 1.0, k4_buffer );

// Update the agent opinions
for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent )
{
// y_(n+1) = y_n+1/6k_1+1/3k_2+1/3k_3+1/6k_4+O(h^5)
agents[idx_agent].data.opinion
+= ( k1_buffer[idx_agent] + 2 * k2_buffer[idx_agent] + 2 * k3_buffer[idx_agent] + k4_buffer[idx_agent] )
/ 6.0;
}
}

void Seldon::ActivityAgentModel::get_euler_slopes( std::vector<double> & k_buffer )
{
// k_1 = hf(x_n,y_n)
// h is the timestep
auto neighbour_buffer = std::vector<size_t>();
size_t j_index = 0;

agents_current_copy = agents; // Set the copy to the current state of agents. TODO: this is somewhat wasteful since
// activities are not changing
k_buffer.resize( network.n_agents() );

for( size_t i = 0; i < network.n_agents(); i++ )
for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent )
{
network.get_neighbours( i, neighbour_buffer ); // Get the incoming neighbours
network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours
k_buffer[idx_agent] = -agents[idx_agent].data.opinion;
// Loop through neighbouring agents
for( size_t j = 0; j < neighbour_buffer.size(); j++ )
{
// TODO: currently this uses an euler integration -> use RK4
j_index = neighbour_buffer[j];
agents_current_copy[i].data.opinion
+= dt * ( -agents[i].data.opinion + K * ( std::tanh( alpha * agents[j_index].data.opinion ) ) );
k_buffer[idx_agent] += K * ( std::tanh( alpha * agents[j_index].data.opinion ) );
}
// Multiply by the timestep
k_buffer[idx_agent] *= dt;
}
}

void Seldon::ActivityAgentModel::get_euler_slopes(
std::vector<double> & k_previous, double factor, std::vector<double> & k_buffer )
{
// h is the timestep
auto neighbour_buffer = std::vector<size_t>();
size_t j_index = 0;

// Update the agents from the copy
for( std::size_t i = 0; i < agents.size(); i++ )
k_buffer.resize( network.n_agents() );

for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent )
{
max_opinion_diff
= std::max( max_opinion_diff, std::abs( agents[i].data.opinion - agents_current_copy[i].data.opinion ) );
agents[i] = agents_current_copy[i];
network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours
k_buffer[idx_agent] = -( agents[idx_agent].data.opinion + factor * k_previous[idx_agent] );
// Loop through neighbouring agents
for( size_t j = 0; j < neighbour_buffer.size(); j++ )
{
j_index = neighbour_buffer[j];
k_buffer[idx_agent] += K * ( std::tanh( alpha * ( agents[j_index].data.opinion + factor * k_previous[j_index] ) ) );
}
// Multiply by the timestep
k_buffer[idx_agent] *= dt;
}
}
10 changes: 4 additions & 6 deletions src/network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,10 @@ void Seldon::Network::set_neighbours_and_weights(
std::size_t agent_idx, const std::vector<size_t> & buffer_neighbours,
const std::vector<Seldon::Network::WeightT> & buffer_weights )
{
if( buffer_neighbours.size() != buffer_weights.size() )
[[unlikely]]
{
throw std::runtime_error(
"Network::set_neighbours_and_weights: both buffers need to have the same length!" );
}
if( buffer_neighbours.size() != buffer_weights.size() ) [[unlikely]]
{
throw std::runtime_error( "Network::set_neighbours_and_weights: both buffers need to have the same length!" );
}

neighbour_list[agent_idx] = buffer_neighbours;
weight_list[agent_idx] = buffer_weights;
Expand Down

0 comments on commit 1bb17ea

Please sign in to comment.