From ce74473eb06f23785117c91b1ef6e77090fc374c Mon Sep 17 00:00:00 2001 From: Amrita Goswami Date: Sat, 23 Mar 2024 15:54:11 +0000 Subject: [PATCH] ActivityDrivenModel: Changed get_euler_slopes We changed the definition of get_euler_slopes. Now the timestep is multiplied in the update rule. This enables us to re-use get_euler_slopes in the InertialModel. Co-authored-by: Moritz Sallermann --- include/models/ActivityDrivenModel.hpp | 6 +++--- src/models/ActivityDrivenModel.cpp | 10 ++++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index 5cd4401..237bbe9 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -71,6 +71,7 @@ class ActivityDrivenModelAbstract : public Model std::mt19937 & gen; // reference to simulation Mersenne-Twister engine std::set> reciprocal_edge_buffer{}; +protected: // Model-specific parameters double dt{}; // Timestep for the integration of the coupled ODEs // Various free parameters @@ -93,7 +94,6 @@ class ActivityDrivenModelAbstract : public Model double reluctance_eps{}; double covariance_factor{}; -protected: size_t n_bots = 0; // The first n_bots agents are bots std::vector bot_m = std::vector( 0 ); std::vector bot_activity = std::vector( 0 ); @@ -343,8 +343,8 @@ class ActivityDrivenModelAbstract : public Model k_buffer[idx_agent] += 1.0 / network.agents[idx_agent].data.reluctance * K * weight_buffer[j] * std::tanh( alpha * opinion( j_index ) ); } - // Multiply by the timestep - k_buffer[idx_agent] *= dt; + // Here, we won't multiply by the timestep. + // Instead multiply in the update rule } } }; diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index bdfc7c4..0b0dd9c 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -20,19 +20,21 @@ void ActivityDrivenModelAbstract::iteration() get_euler_slopes( k1_buffer, [this]( size_t i ) { return network.agents[i].data.opinion; } ); // k_2 = hf(x_n+1/2h,y_n+1/2k_1) get_euler_slopes( - k2_buffer, [this]( size_t i ) { return network.agents[i].data.opinion + 0.5 * this->k1_buffer[i]; } ); + k2_buffer, [this]( size_t i ) { return network.agents[i].data.opinion + 0.5 * dt * this->k1_buffer[i]; } ); // k_3 = hf(x_n+1/2h,y_n+1/2k_2) get_euler_slopes( - k3_buffer, [this]( size_t i ) { return network.agents[i].data.opinion + 0.5 * this->k2_buffer[i]; } ); + k3_buffer, [this]( size_t i ) { return network.agents[i].data.opinion + 0.5 * dt * this->k2_buffer[i]; } ); // k_4 = hf(x_n+h,y_n+k_3) - get_euler_slopes( k4_buffer, [this]( size_t i ) { return network.agents[i].data.opinion + this->k3_buffer[i]; } ); + get_euler_slopes( + k4_buffer, [this]( size_t i ) { return network.agents[i].data.opinion + dt * this->k3_buffer[i]; } ); // Update the agent opinions for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) { // y_(n+1) = y_n+1/6k_1+1/3k_2+1/3k_3+1/6k_4+O(h^5) network.agents[idx_agent].data.opinion - += ( k1_buffer[idx_agent] + 2 * k2_buffer[idx_agent] + 2 * k3_buffer[idx_agent] + k4_buffer[idx_agent] ) + += dt + * ( k1_buffer[idx_agent] + 2 * k2_buffer[idx_agent] + 2 * k3_buffer[idx_agent] + k4_buffer[idx_agent] ) / 6.0; }