Skip to content

Commit

Permalink
Tests: Activity driven model with two agents
Browse files Browse the repository at this point in the history
Co-authored-by: Amrita Goswami <[email protected]>
  • Loading branch information
MSallermann and amritagos committed Mar 12, 2024
1 parent fa088f7 commit 0d9913c
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 11 deletions.
11 changes: 9 additions & 2 deletions include/model_base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,15 @@ class ModelBase
public:
int n_iterations = 0;
virtual AgentBase * get_agent( int idx ) = 0; // Use this to get an abstract representation of the agent at idx
virtual void iteration() = 0;
virtual bool finished() = 0;

template<typename AgentT>
AgentT * get_agent_as( int idx )
{
return static_cast<AgentT *>( get_agent( idx ) );
}

virtual void iteration() = 0;
virtual bool finished() = 0;
virtual void agents_from_file( const std::string & file ) = 0;
virtual ~ModelBase() = default;
};
Expand Down
1 change: 1 addition & 0 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ inline void Agent<ActivityAgentData>::from_string( const std::string & str )

class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
{
public:
using AgentT = Agent<ActivityAgentData>;

private:
Expand Down
18 changes: 9 additions & 9 deletions test/res/2_agents_activity_prob.toml
Original file line number Diff line number Diff line change
@@ -1,24 +1,24 @@
[simulation]
model = "ActivityDriven"
rng_seed = 120 # Leaving this empty will pick a random seed
rng_seed = 120 # Leaving this empty will pick a random seed

[io]
n_output_network = 1 # Write the network every 20 iterations
n_output_network = 1 # Write the network every 20 iterations
n_output_agents = 1
print_progress = true # Print the iteration time ; if not set, then always print

[model]
max_iterations = 2 # If not set, max iterations is infinite
max_iterations = 10000 # If not set, max iterations is infinite

[ActivityDriven]
dt = 0.01 # Timestep for the integration of the coupled ODEs
m = 1 # Number of agents contacted, when the agent is active
eps = 1 # Minimum activity epsilon; a_i belongs to [epsilon,1]
dt = 0.005 # Timestep for the integration of the coupled ODEs
m = 1 # Number of agents contacted, when the agent is active
eps = 1 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 1 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
reciprocity = 1 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random
alpha = 3.0 # Controversialness of the issue, must be greater than 0.
K = 3.0 # Social interaction strength
alpha = 1.01 # Controversialness of the issue, must be greater than 0.
K = 2.0 # Social interaction strength
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents
mean_weights = false # Use the meanfield approximation of the network edges

Expand Down
25 changes: 25 additions & 0 deletions test/test_activity.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#include "models/ActivityDrivenModel.hpp"
#include <catch2/catch_test_macros.hpp>
#include <catch2/matchers/catch_matchers_floating_point.hpp>

Expand Down Expand Up @@ -27,7 +28,9 @@ TEST_CASE(
fs::path output_dir_path = proj_root_path / fs::path( "test/output" );

// Create the output directory; this is done by main
fs::remove_all( output_dir_path );
fs::create_directories( output_dir_path );

// This should be empty
REQUIRE( fs::is_empty( output_dir_path ) );

Expand All @@ -54,11 +57,33 @@ TEST_CASE( "Test the probabilistic activity driven model for two agents", "[acti

// We need an output path for Simulation, but we won't write anything out there?
fs::path output_dir_path = proj_root_path / fs::path( "test/output" );

fs::remove_all( output_dir_path );
fs::create_directories( output_dir_path );

// Zero step
auto filename = fmt::format( "opinions_{}.txt", 0 );
Seldon::IO::opinions_to_file( simulation, ( output_dir_path / fs::path( filename ) ).string() );

simulation.run( output_dir_path );

auto model_settings = std::get<Seldon::Config::ActivityDrivenSettings>( options.model_settings );
auto K = model_settings.K;
auto alpha = model_settings.alpha;

// Check that the parameters match our assumptions for the numerical solution
REQUIRE_THAT( K, WithinAbs( 2.0, 1e-16 ) );
REQUIRE_THAT( alpha, WithinAbs( 1.01, 1e-16 ) );

// This is the solution of x = K tanh(alpha x)
double analytical_x = 1.9187384098662013;

fmt::print( "analytical_x = {}\n", analytical_x );

for( size_t idx_agent = 0; idx_agent < simulation.network->n_agents(); idx_agent++ )
{
auto * agent = simulation.model->get_agent_as<ActivityAgentModel::AgentT>( idx_agent );
fmt::print( "{} \n", agent->data.opinion );
REQUIRE_THAT( agent->data.opinion, WithinAbs( analytical_x, 1e-4 ) );
}
}

0 comments on commit 0d9913c

Please sign in to comment.