From 9ebb9ed36e1f45c676332f397379319a724b728a Mon Sep 17 00:00:00 2001 From: amritagos Date: Mon, 9 Oct 2023 22:51:48 +0000 Subject: [PATCH 01/32] Activity driven model: Init and started bare bones outline of what the class should look like --- include/agent.hpp | 14 ++++--- include/models/ActivityDrivenModel.hpp | 51 ++++++++++++++++++++++++++ include/util/io.hpp | 1 - meson.build | 1 + src/models/ActivityDrivenModel.cpp | 23 ++++++++++++ src/models/DeGroot.cpp | 8 ++-- src/simulation.cpp | 2 +- test/test_deGroot.cpp | 8 ++-- 8 files changed, 92 insertions(+), 16 deletions(-) create mode 100644 include/models/ActivityDrivenModel.hpp create mode 100644 src/models/ActivityDrivenModel.cpp diff --git a/include/agent.hpp b/include/agent.hpp index 215dec7..9ab0fd1 100644 --- a/include/agent.hpp +++ b/include/agent.hpp @@ -4,28 +4,30 @@ namespace Seldon { -/* Agent is a base class for actual agents with opinion type T, it needs to implement to_string and from_string*/ +/* Agent is a base class for actual agents with data type T +(which contains an opinion and perhaps some other things), +it needs to implement to_string and from_string*/ template class Agent : public AgentBase { public: - using opinion_t = T; - opinion_t opinion; + using data_t = T; + data_t data; Agent() = default; - Agent( opinion_t opinion ) : opinion( opinion ) {} + Agent( data_t data ) : data( data ) {} void from_string( const std::string & str ); std::string to_string() const override { - return fmt::format( "{}", opinion ); + return fmt::format( "{}", data ); } }; template<> inline void Agent::from_string( const std::string & str ) { - opinion = std::stod( str ); + data = std::stod( str ); } } // namespace Seldon \ No newline at end of file diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp new file mode 100644 index 0000000..67893b2 --- /dev/null +++ b/include/models/ActivityDrivenModel.hpp @@ -0,0 +1,51 @@ +#pragma once +#include "model.hpp" +#include "network.hpp" +#include + +namespace Seldon +{ + +struct ActivityAgentType +{ + double opinion = 0; // x_i + double activity = 0; // a_i +}; + +template<> +std::string Agent::to_string() const +{ + return fmt::format( "{}, {}", data.opinion, data.activity ); +}; + +class ActivityAgentModel : public Model> +{ + +private: + double max_opinion_diff = 0; + Network & network; + std::vector agents_current_copy; + // Model-specific parameters + double dt = 0.1; // Timestep for the integration of the coupled ODEs + // Various free parameters + int m = 10; // Number of agents contacted, when the agent is active + double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1] + double gamma = 2.1; // Exponent of activity power law distribution of activities + double alpha; // Controversialness of the issue, must be greater than 0. + double homophily = 0; // aka beta. if zero, agents pick their interaction partners at random + // Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling + // j also sends feedback to i. So every agent can have more than m incoming connections + double reciprocity = 0.5; + double K; // Social interaction strength; K>0 + +public: + double convergence_tol = 1e-12; + + ActivityAgentModel( int n_agents, Network & network ); + + // void iteration() override; + // bool finished() overteration() override; + // bool finished() override; +}; + +} // namespace Seldon \ No newline at end of file diff --git a/include/util/io.hpp b/include/util/io.hpp index 791f47f..6b02d91 100644 --- a/include/util/io.hpp +++ b/include/util/io.hpp @@ -12,7 +12,6 @@ namespace Seldon namespace IO { - inline void network_to_dot_file( const Network & network, const std::string & file_path ) { std::fstream fs; diff --git a/meson.build b/meson.build index ff6d5c2..36d17a8 100644 --- a/meson.build +++ b/meson.build @@ -10,6 +10,7 @@ sources_seldon = [ 'src/network_generation.cpp', 'src/model.cpp', 'src/models/DeGroot.cpp', + 'src/models/ActivityDrivenModel.cpp', 'src/util/tomlplusplus.cpp' ] diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp new file mode 100644 index 0000000..a13d3c4 --- /dev/null +++ b/src/models/ActivityDrivenModel.cpp @@ -0,0 +1,23 @@ +#include "models/ActivityDrivenModel.hpp" +#include + +Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network ) + : Model( n_agents ), network( network ), agents_current_copy( std::vector( n_agents ) ) +{ + // TODO: take this from simulation + int rng_seed = std::random_device()(); + auto gen = std::mt19937( rng_seed ); // TODO + std::uniform_int_distribution<> dis( -1, 1 ); // Opinion initial values + std::uniform_int_distribution<> dist_activity( 0, 1 ); // Uniform random variable for activities + + // Initial conditions for the opinions, initialize to [-1,1] + // The activities should be drawn from a power law distribution + for( size_t i = 0; i < agents.size(); i++ ) + { + agents[i].data.opinion = dis( gen ); // Draw the opinion value + // Draw from a power law distribution (1-gamma)/(1-eps^(1-gamma)) * a^(-gamma) + } + + +} + diff --git a/src/models/DeGroot.cpp b/src/models/DeGroot.cpp index 153d6e9..d8954b2 100644 --- a/src/models/DeGroot.cpp +++ b/src/models/DeGroot.cpp @@ -7,7 +7,7 @@ Seldon::DeGrootModel::DeGrootModel( int n_agents, Network & network ) { for( size_t i = 0; i < agents.size(); i++ ) { - agents[i].opinion = double( i ) / double( agents.size() ); + agents[i].data = double( i ) / double( agents.size() ); } } @@ -24,12 +24,12 @@ void Seldon::DeGrootModel::iteration() { network.get_neighbours( i, neighbour_buffer ); network.get_weights( i, weight_buffer ); - agents_current_copy[i].opinion = 0.0; + agents_current_copy[i].data = 0.0; for( size_t j = 0; j < neighbour_buffer.size(); j++ ) { j_index = neighbour_buffer[j]; weight = weight_buffer[j]; - agents_current_copy[i].opinion += weight * agents[j_index].opinion; + agents_current_copy[i].data += weight * agents[j_index].data; } } @@ -37,7 +37,7 @@ void Seldon::DeGrootModel::iteration() // Update the original agent opinions for( std::size_t i = 0; i < agents.size(); i++ ) { - max_opinion_diff = std::max( max_opinion_diff, std::abs( agents[i].opinion - agents_current_copy[i].opinion ) ); + max_opinion_diff = std::max( max_opinion_diff, std::abs( agents[i].data - agents_current_copy[i].data ) ); agents[i] = agents_current_copy[i]; } } diff --git a/src/simulation.cpp b/src/simulation.cpp index 93801c9..f0238f1 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -67,7 +67,7 @@ Seldon::Simulation::Simulation( } n_agents = network->n_agents(); - fmt::print("Network has {} agents\n", n_agents); + fmt::print( "Network has {} agents\n", n_agents ); // Construct the model object // Generic model parameters diff --git a/test/test_deGroot.cpp b/test/test_deGroot.cpp index 65128c0..2c7c51d 100644 --- a/test/test_deGroot.cpp +++ b/test/test_deGroot.cpp @@ -27,8 +27,8 @@ TEST_CASE( "Test the DeGroot Model Symmetric", "[DeGroot]" ) model.convergence_tol = 1e-6; model.max_iterations = 100; - model.agents[0].opinion = 0.0; - model.agents[1].opinion = 1.0; + model.agents[0].data = 0.0; + model.agents[1].data = 1.0; do { @@ -38,7 +38,7 @@ TEST_CASE( "Test the DeGroot Model Symmetric", "[DeGroot]" ) fmt::print( "N_iterations = {} (with convergence_tol {})\n", model.n_iterations, model.convergence_tol ); for( size_t i = 0; i < n_agents; i++ ) { - fmt::print( "Opinion {} = {}\n", i, model.agents[i].opinion ); - REQUIRE_THAT( model.agents[i].opinion, WithinAbs( 0.5, model.convergence_tol * 10.0 ) ); + fmt::print( "Opinion {} = {}\n", i, model.agents[i].data ); + REQUIRE_THAT( model.agents[i].data, WithinAbs( 0.5, model.convergence_tol * 10.0 ) ); } } \ No newline at end of file From a0387748eec02c9dea18176e7a477bdcadb816bd Mon Sep 17 00:00:00 2001 From: amritagos Date: Tue, 10 Oct 2023 13:42:28 +0000 Subject: [PATCH 02/32] Use the Mersenne-Twister engine from simulation --- include/models/ActivityDrivenModel.hpp | 23 +++++++++++++---------- src/models/ActivityDrivenModel.cpp | 10 ++++------ test/test_deGroot.cpp | 8 ++++---- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index 67893b2..fb699fc 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -1,6 +1,7 @@ #pragma once #include "model.hpp" #include "network.hpp" +#include #include namespace Seldon @@ -25,23 +26,25 @@ class ActivityAgentModel : public Model> double max_opinion_diff = 0; Network & network; std::vector agents_current_copy; - // Model-specific parameters + // Random number generation + std::mt19937 & gen; // reference to simulation Mersenne-Twister engine + // Model-specific parameters double dt = 0.1; // Timestep for the integration of the coupled ODEs - // Various free parameters - int m = 10; // Number of agents contacted, when the agent is active - double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1] - double gamma = 2.1; // Exponent of activity power law distribution of activities - double alpha; // Controversialness of the issue, must be greater than 0. + // Various free parameters + int m = 10; // Number of agents contacted, when the agent is active + double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1] + double gamma = 2.1; // Exponent of activity power law distribution of activities + double alpha; // Controversialness of the issue, must be greater than 0. double homophily = 0; // aka beta. if zero, agents pick their interaction partners at random // Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling - // j also sends feedback to i. So every agent can have more than m incoming connections - double reciprocity = 0.5; - double K; // Social interaction strength; K>0 + // j also sends feedback to i. So every agent can have more than m incoming connections + double reciprocity = 0.5; + double K; // Social interaction strength; K>0 public: double convergence_tol = 1e-12; - ActivityAgentModel( int n_agents, Network & network ); + ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen ); // void iteration() override; // bool finished() overteration() override; diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index a13d3c4..6d2a904 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -1,14 +1,12 @@ #include "models/ActivityDrivenModel.hpp" #include -Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network ) - : Model( n_agents ), network( network ), agents_current_copy( std::vector( n_agents ) ) +Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen ) + : Model( n_agents ), network( network ), agents_current_copy( std::vector( n_agents ) ), gen( gen ) { // TODO: take this from simulation - int rng_seed = std::random_device()(); - auto gen = std::mt19937( rng_seed ); // TODO - std::uniform_int_distribution<> dis( -1, 1 ); // Opinion initial values - std::uniform_int_distribution<> dist_activity( 0, 1 ); // Uniform random variable for activities + std::uniform_real_distribution<> dis( -1, 1 ); // Opinion initial values + std::uniform_real_distribution<> dist_activity( 0, 1 ); // Uniform random variable for activities // Initial conditions for the opinions, initialize to [-1,1] // The activities should be drawn from a power law distribution diff --git a/test/test_deGroot.cpp b/test/test_deGroot.cpp index 2c7c51d..309aecd 100644 --- a/test/test_deGroot.cpp +++ b/test/test_deGroot.cpp @@ -25,10 +25,10 @@ TEST_CASE( "Test the DeGroot Model Symmetric", "[DeGroot]" ) auto network = Network( std::move( neighbour_list ), std::move( weight_list ) ); auto model = DeGrootModel( n_agents, network ); - model.convergence_tol = 1e-6; - model.max_iterations = 100; - model.agents[0].data = 0.0; - model.agents[1].data = 1.0; + model.convergence_tol = 1e-6; + model.max_iterations = 100; + model.agents[0].data = 0.0; + model.agents[1].data = 1.0; do { From 8b65bee132ae77a2d7977ee6491b36315767d4ad Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Tue, 10 Oct 2023 18:40:11 +0000 Subject: [PATCH 03/32] Small fixes: - Made Agent::to_string inline - and implemented dummy version of iteration() - also started to implement initialisation of ActivityDrivenModel in simulation.cpp - formatted ActivityDrivenModel.cpp --- include/models/ActivityDrivenModel.hpp | 5 +++-- src/models/ActivityDrivenModel.cpp | 18 ++++++++++-------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index fb699fc..1f3ca42 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -1,4 +1,5 @@ #pragma once + #include "model.hpp" #include "network.hpp" #include @@ -14,7 +15,7 @@ struct ActivityAgentType }; template<> -std::string Agent::to_string() const +inline std::string Agent::to_string() const { return fmt::format( "{}, {}", data.opinion, data.activity ); }; @@ -46,7 +47,7 @@ class ActivityAgentModel : public Model> ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen ); - // void iteration() override; + void iteration() override; // bool finished() overteration() override; // bool finished() override; }; diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 6d2a904..1cee91c 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -2,20 +2,22 @@ #include Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen ) - : Model( n_agents ), network( network ), agents_current_copy( std::vector( n_agents ) ), gen( gen ) -{ - // TODO: take this from simulation - std::uniform_real_distribution<> dis( -1, 1 ); // Opinion initial values - std::uniform_real_distribution<> dist_activity( 0, 1 ); // Uniform random variable for activities + : Model( n_agents ), + network( network ), + agents_current_copy( std::vector( n_agents ) ), + gen( gen ) +{ + // TODO: take this from simulation + std::uniform_real_distribution<> dis( -1, 1 ); // Opinion initial values + std::uniform_real_distribution<> dist_activity( 0, 1 ); // Uniform random variable for activities // Initial conditions for the opinions, initialize to [-1,1] // The activities should be drawn from a power law distribution for( size_t i = 0; i < agents.size(); i++ ) { - agents[i].data.opinion = dis( gen ); // Draw the opinion value + agents[i].data.opinion = dis( gen ); // Draw the opinion value // Draw from a power law distribution (1-gamma)/(1-eps^(1-gamma)) * a^(-gamma) } - - } +void Seldon::ActivityAgentModel::iteration() {} \ No newline at end of file From e1e6ba7a6f36d3bc7752b784ca248847f1a52f44 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Tue, 10 Oct 2023 19:17:02 +0000 Subject: [PATCH 04/32] ActivityDrivenModel can now be constructed in simulation.cpp --- include/models/ActivityDrivenModel.hpp | 9 ++++++--- src/main.cpp | 3 +++ src/models/ActivityDrivenModel.cpp | 8 ++++++-- src/simulation.cpp | 11 +++++++++-- 4 files changed, 24 insertions(+), 7 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index 1f3ca42..b6cace0 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -1,5 +1,6 @@ #pragma once +#include "agent.hpp" #include "model.hpp" #include "network.hpp" #include @@ -8,20 +9,21 @@ namespace Seldon { -struct ActivityAgentType +struct ActivityAgentData { double opinion = 0; // x_i double activity = 0; // a_i }; template<> -inline std::string Agent::to_string() const +inline std::string Agent::to_string() const { return fmt::format( "{}, {}", data.opinion, data.activity ); }; -class ActivityAgentModel : public Model> +class ActivityAgentModel : public Model> { + using AgentT = Agent; private: double max_opinion_diff = 0; @@ -31,6 +33,7 @@ class ActivityAgentModel : public Model> std::mt19937 & gen; // reference to simulation Mersenne-Twister engine // Model-specific parameters double dt = 0.1; // Timestep for the integration of the coupled ODEs + // Various free parameters int m = 10; // Number of agents contacted, when the agent is active double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1] diff --git a/src/main.cpp b/src/main.cpp index 89ac1d8..bd443ee 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -41,6 +41,9 @@ int main( int argc, char * argv[] ) fs::create_directories( output_dir_path ); // Create the output directory auto simulation = Seldon::Simulation( config_file_path.string(), network_file, agent_file ); + + fmt::print( "{}", simulation.model->get_agent(0)->to_string() ); + Seldon::IO::network_to_dot_file( *simulation.network, ( output_dir_path / fs::path( "network.dot" ) ).string() ); Seldon::IO::network_to_file( simulation, ( output_dir_path / fs::path( "network.txt" ) ).string() ); auto filename = fmt::format( "opinions_{}.txt", 0 ); diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 1cee91c..840c110 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -1,8 +1,9 @@ #include "models/ActivityDrivenModel.hpp" #include +#include Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen ) - : Model( n_agents ), + : Model( n_agents ), network( network ), agents_current_copy( std::vector( n_agents ) ), gen( gen ) @@ -20,4 +21,7 @@ Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, } } -void Seldon::ActivityAgentModel::iteration() {} \ No newline at end of file +void Seldon::ActivityAgentModel::iteration() +{ + throw std::runtime_error( "ActivityAgentModel::iteration():: Not implemented!" ); +} \ No newline at end of file diff --git a/src/simulation.cpp b/src/simulation.cpp index f0238f1..ca320a8 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -1,4 +1,5 @@ #include "simulation.hpp" +#include "models/ActivityDrivenModel.hpp" #include "models/DeGroot.hpp" #include "network_generation.hpp" #include "util/tomlplusplus.hpp" @@ -17,7 +18,7 @@ enum class ModelType : unsigned int Seldon::Simulation::Simulation( std::string config_file, std::optional cli_network_file, std::optional cli_agent_file ) { - std::set allowed_models = { "DeGroot" }; + std::set allowed_models = { "DeGroot", "ActivityDriven" }; toml::table tbl; tbl = toml::parse_file( config_file ); @@ -48,6 +49,8 @@ Seldon::Simulation::Simulation( throw std::runtime_error( fmt::format( "Unknown model type: '{}'!", model_string ) ); } + fmt::print( "Model type: {}\n", model_string ); + // Construct the network std::optional file = cli_network_file; if( !file.has_value() ) // Check if toml file should be superceded by cli_network_file @@ -89,8 +92,12 @@ Seldon::Simulation::Simulation( fmt::print( "Reading agents from file {}\n", cli_agent_file.value() ); model_DeGroot->Agents_from_File( cli_agent_file.value() ); } - model = std::move( model_DeGroot ); model_type = ModelType::DeGroot; } + else if( model_string == "ActivityDriven" ) + { + auto model_activityDriven = std::make_unique( n_agents, *network, gen ); + model = std::move( model_activityDriven ); + } } \ No newline at end of file From 1069111024e2f9291b576387e978c7467b5d6761 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Tue, 10 Oct 2023 19:23:23 +0000 Subject: [PATCH 05/32] removed unnecessary enum ModelType from simulation.cpp --- src/simulation.cpp | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/simulation.cpp b/src/simulation.cpp index ca320a8..8adaeeb 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -10,11 +10,6 @@ #include #include -enum class ModelType : unsigned int -{ - DeGroot -}; - Seldon::Simulation::Simulation( std::string config_file, std::optional cli_network_file, std::optional cli_agent_file ) { @@ -76,7 +71,6 @@ Seldon::Simulation::Simulation( // Generic model parameters std::optional max_iterations = tbl["model"]["max_iterations"].value(); - ModelType model_type; if( model_string == "DeGroot" ) { // DeGroot specific parameters @@ -92,8 +86,7 @@ Seldon::Simulation::Simulation( fmt::print( "Reading agents from file {}\n", cli_agent_file.value() ); model_DeGroot->Agents_from_File( cli_agent_file.value() ); } - model = std::move( model_DeGroot ); - model_type = ModelType::DeGroot; + model = std::move( model_DeGroot ); } else if( model_string == "ActivityDriven" ) { From b6b548708b16e5940babc3e1e187ce7e840c29d2 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Tue, 10 Oct 2023 19:50:30 +0000 Subject: [PATCH 06/32] put agents_from_file declaration in model base and fixed construction of activity driven model further --- include/model.hpp | 15 ++++++--------- include/model_base.hpp | 3 ++- src/simulation.cpp | 11 +++++++++-- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/include/model.hpp b/include/model.hpp index 5b44e5e..4a8967f 100644 --- a/include/model.hpp +++ b/include/model.hpp @@ -19,7 +19,7 @@ class Model : public ModelBase Model( size_t n_agents ) : agents( std::vector( int( n_agents ), AgentT() ) ) {} Model( std::vector && agents ) : agents( agents ) {} - void Agents_from_File( const std::string & file ) + void agents_from_file( const std::string & file ) override { agents.clear(); @@ -39,7 +39,7 @@ class Model : public ModelBase auto line = file_contents.substr( start_of_line, end_of_line - start_of_line ); start_of_line = end_of_line + 1; // TODO: check if empty or comment - if( line.size() == 0 ) + if( line.empty() ) { break; } @@ -57,7 +57,10 @@ class Model : public ModelBase } } - void iteration() override = 0; + void iteration() override + { + n_iterations++; + }; bool finished() override { @@ -77,10 +80,4 @@ class Model : public ModelBase } }; -template -void Seldon::Model::iteration() -{ - n_iterations++; -}; - } // namespace Seldon \ No newline at end of file diff --git a/include/model_base.hpp b/include/model_base.hpp index 86159ae..7612031 100644 --- a/include/model_base.hpp +++ b/include/model_base.hpp @@ -16,7 +16,8 @@ class ModelBase virtual AgentBase * get_agent( int idx ) = 0; // Use this to get an abstract representation of the agent at idx virtual void iteration() = 0; virtual bool finished() = 0; - virtual ~ModelBase() = default; + virtual void agents_from_file( const std::string & file ) = 0; + virtual ~ModelBase() = default; }; } // namespace Seldon \ No newline at end of file diff --git a/src/simulation.cpp b/src/simulation.cpp index 8adaeeb..6ac096f 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -90,7 +90,14 @@ Seldon::Simulation::Simulation( } else if( model_string == "ActivityDriven" ) { - auto model_activityDriven = std::make_unique( n_agents, *network, gen ); - model = std::move( model_activityDriven ); + auto model_activityDriven = std::make_unique( n_agents, *network, gen ); + model_activityDriven->max_iterations = max_iterations; + model = std::move( model_activityDriven ); + } + + if( cli_agent_file.has_value() ) + { + fmt::print( "Reading agents from file {}\n", cli_agent_file.value() ); + model->agents_from_file( cli_agent_file.value() ); } } \ No newline at end of file From d61f107d359d6080001a9af5372cdb7ba46dc970 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Tue, 10 Oct 2023 19:53:12 +0000 Subject: [PATCH 07/32] parse parameters for ActivityDriven model from conf --- include/models/ActivityDrivenModel.hpp | 14 +++++++++++--- src/models/ActivityDrivenModel.cpp | 4 +++- src/simulation.cpp | 18 ++++++++++-------- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index b6cace0..8c62122 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -4,6 +4,7 @@ #include "model.hpp" #include "network.hpp" #include +#include #include namespace Seldon @@ -21,6 +22,13 @@ inline std::string Agent::to_string() const return fmt::format( "{}, {}", data.opinion, data.activity ); }; +template<> +inline void Agent::from_string( const std::string & str ) +{ + // TODO + throw std::runtime_error( "Agent::from_string not implemented yet" ); +}; + class ActivityAgentModel : public Model> { using AgentT = Agent; @@ -31,9 +39,10 @@ class ActivityAgentModel : public Model> std::vector agents_current_copy; // Random number generation std::mt19937 & gen; // reference to simulation Mersenne-Twister engine + +public: // Model-specific parameters double dt = 0.1; // Timestep for the integration of the coupled ODEs - // Various free parameters int m = 10; // Number of agents contacted, when the agent is active double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1] @@ -45,8 +54,7 @@ class ActivityAgentModel : public Model> double reciprocity = 0.5; double K; // Social interaction strength; K>0 -public: - double convergence_tol = 1e-12; + double convergence_tol = 1e-12; // TODO: ?? ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen ); diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 840c110..c79fcfd 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -23,5 +23,7 @@ Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, void Seldon::ActivityAgentModel::iteration() { - throw std::runtime_error( "ActivityAgentModel::iteration():: Not implemented!" ); + Model::iteration(); + + // throw std::runtime_error( "ActivityAgentModel::iteration():: Not implemented!" ); } \ No newline at end of file diff --git a/src/simulation.cpp b/src/simulation.cpp index 6ac096f..d7d1b77 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -79,18 +79,20 @@ Seldon::Simulation::Simulation( auto model_DeGroot = std::make_unique( n_agents, *network ); model_DeGroot->max_iterations = max_iterations; model_DeGroot->convergence_tol = convergence; - - // TODO: make this more general, ivoke on ModelBase or Model? - if( cli_agent_file.has_value() ) - { - fmt::print( "Reading agents from file {}\n", cli_agent_file.value() ); - model_DeGroot->Agents_from_File( cli_agent_file.value() ); - } - model = std::move( model_DeGroot ); + model = std::move( model_DeGroot ); } else if( model_string == "ActivityDriven" ) { auto model_activityDriven = std::make_unique( n_agents, *network, gen ); + model_activityDriven->dt = tbl["ActivityDriven"]["dt"].value_or( 0.1 ); + model_activityDriven->m = tbl["ActivityDriven"]["m"].value_or( 10 ); + model_activityDriven->eps = tbl["ActivityDriven"]["eps"].value_or( 0.01 ); + model_activityDriven->gamma = tbl["ActivityDriven"]["gamma"].value_or( 2.1 ); + model_activityDriven->homophily = tbl["ActivityDriven"]["homophily"].value_or( 0.0 ); + model_activityDriven->reciprocity = tbl["ActivityDriven"]["reciprocity"].value_or( 0.5 ); + model_activityDriven->alpha = tbl["ActivityDriven"]["alpha"].value_or( 1.0 ); + model_activityDriven->K = tbl["ActivityDriven"]["K"].value_or( 1.0 ); + model_activityDriven->max_iterations = max_iterations; model = std::move( model_activityDriven ); } From be1119d6d1c2b645409942fc2f952336e11bb5e5 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Tue, 10 Oct 2023 19:53:45 +0000 Subject: [PATCH 08/32] test_deGroot: removed unnecessary rng generator --- test/test_deGroot.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_deGroot.cpp b/test/test_deGroot.cpp index 309aecd..3284945 100644 --- a/test/test_deGroot.cpp +++ b/test/test_deGroot.cpp @@ -21,7 +21,6 @@ TEST_CASE( "Test the DeGroot Model Symmetric", "[DeGroot]" ) { 0.2, 0.8 }, }; - auto gen = std::mt19937(); auto network = Network( std::move( neighbour_list ), std::move( weight_list ) ); auto model = DeGrootModel( n_agents, network ); From 47747dbcb7581902abee42476e1c212395732cf9 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Tue, 10 Oct 2023 20:04:46 +0000 Subject: [PATCH 09/32] example conf for activity driven model --- examples/ActivityDriven/conf.toml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 examples/ActivityDriven/conf.toml diff --git a/examples/ActivityDriven/conf.toml b/examples/ActivityDriven/conf.toml new file mode 100644 index 0000000..b2fc11d --- /dev/null +++ b/examples/ActivityDriven/conf.toml @@ -0,0 +1,20 @@ +[simulation] +model = "ActivityDriven" +# rng_seed = 120 # Leaving this empty will pick a random seed + +[model] +max_iterations = 20 # If not set, max iterations is infinite + +[ActivityDriven] +dt = 0.1 # Timestep for the integration of the coupled ODEs +m = 10 # Number of agents contacted, when the agent is active +eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1] +gamma = 2.1 # Exponent of activity power law distribution of activities +homophily = 0.0 # aka beta. if zero, agents pick their interaction partners at random +reciprocity = 0.5 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections +alpha = 1.0 # Controversialness of the issue, must be greater than 0. +K = 1.0 # Social interaction strength + +[network] +number_of_agents = 300 +connections_per_agent = 10 From 5479382115942e896e3867edf6733f95f59ea959 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Thu, 12 Oct 2023 20:53:14 +0000 Subject: [PATCH 10/32] Network: added some functions plus a unit test - set_neighbours_and_weights() - transpose() --- include/network.hpp | 7 ++++ meson.build | 3 +- src/network.cpp | 50 ++++++++++++++++++++------- test/test_network.cpp | 79 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 125 insertions(+), 14 deletions(-) create mode 100644 test/test_network.cpp diff --git a/include/network.hpp b/include/network.hpp index f433b63..7ea7e87 100644 --- a/include/network.hpp +++ b/include/network.hpp @@ -14,9 +14,16 @@ class Network Network( std::vector> && neighbour_list, std::vector> && weight_list ); std::size_t n_agents() const; + void get_neighbours( std::size_t agent_idx, std::vector & buffer ) const; void get_weights( std::size_t agent_idx, std::vector & buffer ) const; + void set_neighbours_and_weights( + std::size_t agent_idx, const std::vector & buffer_neighbours, + const std::vector & buffer_weights ); + + void transpose(); + private: std::vector> neighbour_list; // Neighbour list for the connections std::vector> weight_list; // List for the interaction weights of each connection diff --git a/meson.build b/meson.build index 36d17a8..8459dfc 100644 --- a/meson.build +++ b/meson.build @@ -23,7 +23,8 @@ exe = executable('seldon', sources_seldon + 'src/main.cpp', tests = [ ['Test Tarjan', 'test/test_tarjan.cpp'], - ['Test DeGroot', 'test/test_deGroot.cpp'] + ['Test DeGroot', 'test/test_deGroot.cpp'], + ['Test Netowrk', 'test/test_network.cpp'] ] Catch2 = dependency('Catch2', method : 'cmake', modules : ['Catch2::Catch2WithMain', 'Catch2::Catch2']) diff --git a/src/network.cpp b/src/network.cpp index 721809a..01ac92a 100644 --- a/src/network.cpp +++ b/src/network.cpp @@ -3,6 +3,7 @@ #include #include #include +#include Seldon::Network::Network( std::vector> && neighbour_list, std::vector> && weight_list ) @@ -28,22 +29,45 @@ size_t Seldon::Network::n_agents() const void Seldon::Network::get_neighbours( std::size_t agent_idx, std::vector & buffer ) const { - // TODO: rewrite this using std::span - const size_t n_edges = neighbour_list[agent_idx].size(); - buffer.resize( n_edges ); - for( size_t i_edge = 0; i_edge < n_edges; i_edge++ ) - { - buffer[i_edge] = neighbour_list[agent_idx][i_edge]; - } + buffer = neighbour_list[agent_idx]; +} + +void Seldon::Network::set_neighbours_and_weights( + std::size_t agent_idx, const std::vector & buffer_neighbours, + const std::vector & buffer_weights ) +{ + if( buffer_neighbours.size() != buffer_weights.size() ) + [[unlikely]] + { + throw std::runtime_error( + "Network::set_neighbours_and_weights: both buffers need to have the same length!" ); + } + + neighbour_list[agent_idx] = buffer_neighbours; + weight_list[agent_idx] = buffer_weights; } -void Seldon::Network::get_weights( std::size_t agent_idx, std::vector & buffer ) const +void Seldon::Network::get_weights( std::size_t agent_idx, std::vector & buffer ) const { - // TODO: rewrite this using std::span - const size_t n_edges = weight_list[agent_idx].size(); - buffer.resize( n_edges ); - for( size_t i_edge = 0; i_edge < n_edges; i_edge++ ) + buffer = weight_list[agent_idx]; +} + +void Seldon::Network::transpose() +{ + std::vector> neighbour_list_transpose( n_agents(), std::vector( 0 ) ); + std::vector> weight_list_transpose( n_agents(), std::vector( 0 ) ); + + for( size_t i_agent = 0; i_agent < n_agents(); i_agent++ ) { - buffer[i_edge] = weight_list[agent_idx][i_edge]; + for( size_t i_neighbour = 0; i_neighbour < neighbour_list[i_agent].size(); i_neighbour++ ) + { + const auto neighbour = neighbour_list[i_agent][i_neighbour]; + const auto weight = weight_list[i_agent][i_neighbour]; + neighbour_list_transpose[neighbour].push_back( i_agent ); + weight_list_transpose[neighbour].push_back( weight ); + } } + + neighbour_list = std::move( neighbour_list_transpose ); + weight_list = std::move( weight_list_transpose ); } \ No newline at end of file diff --git a/test/test_network.cpp b/test/test_network.cpp new file mode 100644 index 0000000..839ee8d --- /dev/null +++ b/test/test_network.cpp @@ -0,0 +1,79 @@ +#include "network.hpp" +#include "network_generation.hpp" +#include +#include +#include +#include + +TEST_CASE( "Testing the network class" ) +{ + using namespace Seldon; + + // Generate some network + const int n_agents = 20; + const int n_connections = 10; + std::mt19937 gen( 0 ); + auto network = generate_n_connections( n_agents, n_connections, gen ); + + // Does n_agents work? + REQUIRE( network->n_agents() == n_agents ); + + // Change the connections for agent 3 + std::vector buffer_n{ { 0, 10, 15 } }; + std::vector buffer_w{ 0.1, 0.2, 0.3 }; + network->set_neighbours_and_weights( 3, buffer_n, buffer_w ); + + // Make sure the changes worked + std::vector buffer_n_get{}; + std::vector buffer_w_get{}; + network->get_neighbours( 3, buffer_n_get ); + network->get_weights( 3, buffer_w_get ); + for( size_t i = 0; i < buffer_n_get.size(); i++ ) + { + // CAREFUL: this assumes that the order of edges has been preserved + // which at the moment seems reasonable ... but you never know + REQUIRE( buffer_n[i] == buffer_n_get[i] ); + REQUIRE( buffer_w[i] == buffer_w_get[i] ); + } + + // Now we test the transpose() function + + // First record all the old edges as tupels (i,j,w) where this edge goes from j -> i with weight w + std::set> old_edges; + for( size_t i_agent = 0; i_agent < network->n_agents(); i_agent++ ) + { + network->get_neighbours( i_agent, buffer_n ); + network->get_weights( i_agent, buffer_w ); + + for( size_t i_neighbour = 0; i_neighbour < buffer_n.size(); i_neighbour++ ) + { + auto neighbour = buffer_n[i_neighbour]; + auto weight = buffer_w[i_neighbour]; + std::tuple edge{ i_agent, neighbour, weight }; + old_edges.insert( edge ); + } + } + + network->transpose(); + + // Now we go over the transposed network and try to re-identify all edges + for( size_t i_agent = 0; i_agent < network->n_agents(); i_agent++ ) + { + network->get_neighbours( i_agent, buffer_n ); + network->get_weights( i_agent, buffer_w ); + + for( size_t i_neighbour = 0; i_neighbour < buffer_n.size(); i_neighbour++ ) + { + auto neighbour = buffer_n[i_neighbour]; + auto weight = buffer_w[i_neighbour]; + std::tuple edge{ + neighbour, i_agent, weight + }; // Note that i_agent and neighbour are flipped compared to before + + REQUIRE( old_edges.contains( edge ) ); // can we find the transposed edge? + old_edges.extract( edge ); // extract the edge afterwards + } + } + + REQUIRE( old_edges.empty() ); +} \ No newline at end of file From 75083b6bafce31fbddc99e0c774f818cc125fa5d Mon Sep 17 00:00:00 2001 From: Amrita Goswami Date: Fri, 13 Oct 2023 20:53:12 +0000 Subject: [PATCH 11/32] model: Drawing activities from a power law model --- src/models/ActivityDrivenModel.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index c79fcfd..1f12e37 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -8,9 +8,10 @@ Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, agents_current_copy( std::vector( n_agents ) ), gen( gen ) { - // TODO: take this from simulation std::uniform_real_distribution<> dis( -1, 1 ); // Opinion initial values std::uniform_real_distribution<> dist_activity( 0, 1 ); // Uniform random variable for activities + gamma = 2.1; + eps = 0.01; // Initial conditions for the opinions, initialize to [-1,1] // The activities should be drawn from a power law distribution @@ -18,6 +19,7 @@ Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, { agents[i].data.opinion = dis( gen ); // Draw the opinion value // Draw from a power law distribution (1-gamma)/(1-eps^(1-gamma)) * a^(-gamma) + agents[i].data.activity = std::pow( (1-std::pow(eps,(1-gamma)) ) * dist_activity( gen ) + std::pow(eps,(1-gamma)) , (1/(1-gamma)) ); } } From 9ead9d63b16b3ff42484a73d44074cc1060bcec5 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Fri, 13 Oct 2023 21:09:21 +0000 Subject: [PATCH 12/32] ActivityDrivenModel: slight changes --- include/models/ActivityDrivenModel.hpp | 11 +++++++---- src/models/ActivityDrivenModel.cpp | 17 +++++++++++------ src/simulation.cpp | 12 +++++++----- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index 8c62122..0cd1dd3 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -42,23 +42,26 @@ class ActivityAgentModel : public Model> public: // Model-specific parameters - double dt = 0.1; // Timestep for the integration of the coupled ODEs + double dt = 0.01; // Timestep for the integration of the coupled ODEs // Various free parameters int m = 10; // Number of agents contacted, when the agent is active double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1] double gamma = 2.1; // Exponent of activity power law distribution of activities - double alpha; // Controversialness of the issue, must be greater than 0. - double homophily = 0; // aka beta. if zero, agents pick their interaction partners at random + double alpha = 3.0; // Controversialness of the issue, must be greater than 0. + double homophily = 0.5; // aka beta. if zero, agents pick their interaction partners at random // Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling // j also sends feedback to i. So every agent can have more than m incoming connections double reciprocity = 0.5; - double K; // Social interaction strength; K>0 + double K = 3.0; // Social interaction strength; K>0 double convergence_tol = 1e-12; // TODO: ?? ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen ); + void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set + void iteration() override; + // bool finished() overteration() override; // bool finished() override; }; diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 1f12e37..b29bbf7 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -8,18 +8,23 @@ Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, agents_current_copy( std::vector( n_agents ) ), gen( gen ) { - std::uniform_real_distribution<> dis( -1, 1 ); // Opinion initial values + // Initial conditions for the opinions, initialize to [-1,1] +} + +void Seldon::ActivityAgentModel::get_agents_from_power_law() +{ + std::uniform_real_distribution<> dis_opinion( -1, 1 ); // Opinion initial values std::uniform_real_distribution<> dist_activity( 0, 1 ); // Uniform random variable for activities - gamma = 2.1; - eps = 0.01; - // Initial conditions for the opinions, initialize to [-1,1] // The activities should be drawn from a power law distribution for( size_t i = 0; i < agents.size(); i++ ) { - agents[i].data.opinion = dis( gen ); // Draw the opinion value + agents[i].data.opinion = dis_opinion( gen ); // Draw the opinion value + // Draw from a power law distribution (1-gamma)/(1-eps^(1-gamma)) * a^(-gamma) - agents[i].data.activity = std::pow( (1-std::pow(eps,(1-gamma)) ) * dist_activity( gen ) + std::pow(eps,(1-gamma)) , (1/(1-gamma)) ); + agents[i].data.activity = std::pow( + ( 1 - std::pow( eps, ( 1 - gamma ) ) ) * dist_activity( gen ) + std::pow( eps, ( 1 - gamma ) ), + ( 1 / ( 1 - gamma ) ) ); } } diff --git a/src/simulation.cpp b/src/simulation.cpp index d7d1b77..0878403 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -84,17 +84,19 @@ Seldon::Simulation::Simulation( else if( model_string == "ActivityDriven" ) { auto model_activityDriven = std::make_unique( n_agents, *network, gen ); - model_activityDriven->dt = tbl["ActivityDriven"]["dt"].value_or( 0.1 ); + model_activityDriven->dt = tbl["ActivityDriven"]["dt"].value_or( 0.01 ); model_activityDriven->m = tbl["ActivityDriven"]["m"].value_or( 10 ); model_activityDriven->eps = tbl["ActivityDriven"]["eps"].value_or( 0.01 ); model_activityDriven->gamma = tbl["ActivityDriven"]["gamma"].value_or( 2.1 ); - model_activityDriven->homophily = tbl["ActivityDriven"]["homophily"].value_or( 0.0 ); + model_activityDriven->homophily = tbl["ActivityDriven"]["homophily"].value_or( 0.5 ); model_activityDriven->reciprocity = tbl["ActivityDriven"]["reciprocity"].value_or( 0.5 ); - model_activityDriven->alpha = tbl["ActivityDriven"]["alpha"].value_or( 1.0 ); - model_activityDriven->K = tbl["ActivityDriven"]["K"].value_or( 1.0 ); + model_activityDriven->alpha = tbl["ActivityDriven"]["alpha"].value_or( 3.0 ); + model_activityDriven->K = tbl["ActivityDriven"]["K"].value_or( 3.0 ); model_activityDriven->max_iterations = max_iterations; - model = std::move( model_activityDriven ); + + model_activityDriven->get_agents_from_power_law(); + model = std::move( model_activityDriven ); } if( cli_agent_file.has_value() ) From 5fb34edd6e3698e4dc9e32c53c6b50de73889f99 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Fri, 13 Oct 2023 21:10:21 +0000 Subject: [PATCH 13/32] meson.build: fixed typo --- meson.build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/meson.build b/meson.build index 8459dfc..058cf0b 100644 --- a/meson.build +++ b/meson.build @@ -24,7 +24,7 @@ exe = executable('seldon', sources_seldon + 'src/main.cpp', tests = [ ['Test Tarjan', 'test/test_tarjan.cpp'], ['Test DeGroot', 'test/test_deGroot.cpp'], - ['Test Netowrk', 'test/test_network.cpp'] + ['Test Network', 'test/test_network.cpp'] ] Catch2 = dependency('Catch2', method : 'cmake', modules : ['Catch2::Catch2WithMain', 'Catch2::Catch2']) From 00c3963b3cbce85674a7fe382f0b69cbfc5196ee Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Fri, 13 Oct 2023 21:17:40 +0000 Subject: [PATCH 14/32] moved draw_unique_k_from_n to util/math.hpp --- include/util/math.hpp | 56 ++++++++++++++++++++++++++++++++++++++ src/network_generation.cpp | 49 ++------------------------------- 2 files changed, 58 insertions(+), 47 deletions(-) create mode 100644 include/util/math.hpp diff --git a/include/util/math.hpp b/include/util/math.hpp new file mode 100644 index 0000000..22528b3 --- /dev/null +++ b/include/util/math.hpp @@ -0,0 +1,56 @@ +#pragma once +#include +#include +#include +#include + +namespace Seldon +{ + +// Function for getting a vector of k agents (corresponding to connections) +// drawing from n agents (without duplication) +// ignore_idx ignores the index of the agent itself, since we will later add the agent itself ourselves to prevent duplication +inline void draw_unique_k_from_n( + std::size_t ignore_idx, std::size_t k, std::size_t n, std::vector & buffer, std::mt19937 & gen ) +{ + struct SequenceGenerator + { + /* An iterator that generates a sequence of integers 2, 3, 4 ...*/ + using iterator_category = std::forward_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = size_t; + using pointer = size_t *; // or also value_type* + using reference = size_t &; + + SequenceGenerator( const size_t i_, const size_t ignore_idx ) : i( i_ ), ignore_idx( ignore_idx ) + { + if( i == ignore_idx ) + { + i++; + } + } + size_t i; + size_t ignore_idx; + + size_t & operator*() + { + return i; + }; + bool operator==( const SequenceGenerator & it1 ) + { + return i == it1.i; + }; + SequenceGenerator & operator++() + { + i++; + if( i == ignore_idx ) + i++; + return *this; + } + }; + + buffer.resize( k ); + std::sample( SequenceGenerator( 0, ignore_idx ), SequenceGenerator( n, ignore_idx ), buffer.begin(), k, gen ); +} + +} // namespace Seldon \ No newline at end of file diff --git a/src/network_generation.cpp b/src/network_generation.cpp index a8c9a88..0580068 100644 --- a/src/network_generation.cpp +++ b/src/network_generation.cpp @@ -8,54 +8,9 @@ #include #include #include +#include #include -// Function for getting a vector of k agents (corresponding to connections) -// drawing from n agents (without duplication) -// ignore_idx ignores the index of the agent itself, since we will later add the agent itself ourselves to prevent duplication -void draw_unique_k_from_n( - std::size_t ignore_idx, std::size_t k, std::size_t n, std::vector & buffer, std::mt19937 & gen ) -{ - struct SequenceGenerator - { - /* An iterator that generates a sequence of integers 2, 3, 4 ...*/ - using iterator_category = std::forward_iterator_tag; - using difference_type = std::ptrdiff_t; - using value_type = size_t; - using pointer = size_t *; // or also value_type* - using reference = size_t &; - - SequenceGenerator( const size_t i_, const size_t ignore_idx ) : i( i_ ), ignore_idx( ignore_idx ) - { - if( i == ignore_idx ) - { - i++; - } - } - size_t i; - size_t ignore_idx; - - size_t & operator*() - { - return i; - }; - bool operator==( const SequenceGenerator & it1 ) - { - return i == it1.i; - }; - SequenceGenerator & operator++() - { - i++; - if( i == ignore_idx ) - i++; - return *this; - } - }; - - buffer.resize( k ); - std::sample( SequenceGenerator( 0, ignore_idx ), SequenceGenerator( n, ignore_idx ), buffer.begin(), k, gen ); -} - std::unique_ptr Seldon::generate_n_connections( int n_agents, int n_connections, std::mt19937 & gen ) { using WeightT = Network::WeightT; @@ -78,7 +33,7 @@ std::unique_ptr Seldon::generate_n_connections( int n_agents, i // Get the vector of sorted adjacencies, excluding i (include i later) // TODO: option for making the n_conections variable - draw_unique_k_from_n( i_agent, n_connections, n_agents, incoming_neighbour_buffer, gen ); + Seldon::draw_unique_k_from_n( i_agent, n_connections, n_agents, incoming_neighbour_buffer, gen ); incoming_neighbour_weights.resize( incoming_neighbour_buffer.size() ); for( size_t j = 0; j < incoming_neighbour_buffer.size(); ++j ) From 59357ae76d430b2ddb790275a30186d5a74ac3aa Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Fri, 13 Oct 2023 21:26:14 +0000 Subject: [PATCH 15/32] Network: created an overload of set_neigbhours_and_weights that sets all neighbours to a uniform weight --- include/network.hpp | 3 +++ src/network.cpp | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/include/network.hpp b/include/network.hpp index 7ea7e87..a98b5cf 100644 --- a/include/network.hpp +++ b/include/network.hpp @@ -18,6 +18,9 @@ class Network void get_neighbours( std::size_t agent_idx, std::vector & buffer ) const; void get_weights( std::size_t agent_idx, std::vector & buffer ) const; + void set_neighbours_and_weights( + std::size_t agent_idx, const std::vector & buffer_neighbours, const WeightT & weight ); + void set_neighbours_and_weights( std::size_t agent_idx, const std::vector & buffer_neighbours, const std::vector & buffer_weights ); diff --git a/src/network.cpp b/src/network.cpp index 01ac92a..7147db2 100644 --- a/src/network.cpp +++ b/src/network.cpp @@ -32,6 +32,18 @@ void Seldon::Network::get_neighbours( std::size_t agent_idx, std::vector buffer = neighbour_list[agent_idx]; } +void Seldon::Network::set_neighbours_and_weights( + std::size_t agent_idx, const std::vector & buffer_neighbours, const WeightT & weight ) +{ + neighbour_list[agent_idx] = buffer_neighbours; + + weight_list[agent_idx].resize( buffer_neighbours.size() ); + for( auto w : weight_list[agent_idx] ) + { + w = weight; + } +} + void Seldon::Network::set_neighbours_and_weights( std::size_t agent_idx, const std::vector & buffer_neighbours, const std::vector & buffer_weights ) From 15356066d2b14b14cc8141abbbdd29124ba33d42 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Fri, 13 Oct 2023 21:51:53 +0000 Subject: [PATCH 16/32] WIP: iteration of activity driven model Co-authored-by: Amrita Goswami --- include/models/ActivityDrivenModel.hpp | 12 +++--- src/models/ActivityDrivenModel.cpp | 58 +++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 7 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index 0cd1dd3..ffbe535 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -44,15 +44,15 @@ class ActivityAgentModel : public Model> // Model-specific parameters double dt = 0.01; // Timestep for the integration of the coupled ODEs // Various free parameters - int m = 10; // Number of agents contacted, when the agent is active - double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1] - double gamma = 2.1; // Exponent of activity power law distribution of activities - double alpha = 3.0; // Controversialness of the issue, must be greater than 0. - double homophily = 0.5; // aka beta. if zero, agents pick their interaction partners at random + int m = 10; // Number of agents contacted, when the agent is active + double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1] + double gamma = 2.1; // Exponent of activity power law distribution of activities + double alpha = 3.0; // Controversialness of the issue, must be greater than 0. + double homophily = 0.5; // aka beta. if zero, agents pick their interaction partners at random // Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling // j also sends feedback to i. So every agent can have more than m incoming connections double reciprocity = 0.5; - double K = 3.0; // Social interaction strength; K>0 + double K = 3.0; // Social interaction strength; K>0 double convergence_tol = 1e-12; // TODO: ?? diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index b29bbf7..62bdcb8 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -1,4 +1,6 @@ #include "models/ActivityDrivenModel.hpp" +#include "util/math.hpp" +#include #include #include @@ -32,5 +34,59 @@ void Seldon::ActivityAgentModel::iteration() { Model::iteration(); - // throw std::runtime_error( "ActivityAgentModel::iteration():: Not implemented!" ); + std::uniform_real_distribution<> dis_activation( 0.0, 1.0 ); // Opinion initial values + std::vector contacted_agents{}; + + for( size_t idx_agent = 0; idx_agent < network.n_agents(); idx_agent++ ) + { + // Test if the agent is activated + bool activated = dis_activation( gen ) < agents[idx_agent].data.activity; + + if( activated ) + { + // Contact m_agents according to the homophily distribution + // TODO: use homophily stuff instead of uniform + Seldon::draw_unique_k_from_n( + idx_agent, m, network.n_agents(), contacted_agents, + gen ); // now contacted_agents contains the indices of the contacted agents + // Set the *outgoing* edges + network.set_neighbours_and_weights( idx_agent, contacted_agents, 1.0 ); + } + else + { + network.set_neighbours_and_weights( idx_agent, {}, {} ); + } + + // TODO: implement reciprocity + // ... + } + + network.transpose(); // transpose the network, so that we have incoming edges + + // Integrate the ODE + auto neighbour_buffer = std::vector(); + size_t j_index = 0; + + agents_current_copy = agents; // Set the copy to the current state of agents. TODO: this is somewhat wasteful since + // activities are not changing + + for( size_t i = 0; i < network.n_agents(); i++ ) + { + network.get_neighbours( i, neighbour_buffer ); // Get the incoming neighbours + for( size_t j = 0; j < neighbour_buffer.size(); j++ ) + { + // TODO: currently this uses an euler integration -> use RK4 + j_index = neighbour_buffer[j]; + agents_current_copy[i].data.opinion + += dt * ( -agents[i].data.opinion + K * ( std::tanh( alpha * agents[j_index].data.opinion ) ) ); + } + } + + // Update the agents from the copy + for( std::size_t i = 0; i < agents.size(); i++ ) + { + max_opinion_diff + = std::max( max_opinion_diff, std::abs( agents[i].data.opinion - agents_current_copy[i].data.opinion ) ); + agents[i] = agents_current_copy[i]; + } } \ No newline at end of file From 7802184d8df8fa5750b8a02b5860500c82274d0b Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Fri, 13 Oct 2023 21:52:11 +0000 Subject: [PATCH 17/32] changed default values in activity driven model example Co-authored-by: Amrita Goswami --- examples/ActivityDriven/conf.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/ActivityDriven/conf.toml b/examples/ActivityDriven/conf.toml index b2fc11d..699eedf 100644 --- a/examples/ActivityDriven/conf.toml +++ b/examples/ActivityDriven/conf.toml @@ -6,15 +6,15 @@ model = "ActivityDriven" max_iterations = 20 # If not set, max iterations is infinite [ActivityDriven] -dt = 0.1 # Timestep for the integration of the coupled ODEs +dt = 0.01 # Timestep for the integration of the coupled ODEs m = 10 # Number of agents contacted, when the agent is active eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1] gamma = 2.1 # Exponent of activity power law distribution of activities -homophily = 0.0 # aka beta. if zero, agents pick their interaction partners at random reciprocity = 0.5 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections -alpha = 1.0 # Controversialness of the issue, must be greater than 0. -K = 1.0 # Social interaction strength +homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random +alpha = 3.0 # Controversialness of the issue, must be greater than 0. +K = 3.0 # Social interaction strength [network] -number_of_agents = 300 +number_of_agents = 1000 connections_per_agent = 10 From 988304c8c01357f78022ef9dff848f58e11fd2df Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sat, 14 Oct 2023 20:14:05 +0000 Subject: [PATCH 18/32] Network: added a push_back_neighbour_and_weight function Co-authored-by: Amrita Goswami --- include/network.hpp | 2 ++ src/network.cpp | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/include/network.hpp b/include/network.hpp index a98b5cf..1ab892a 100644 --- a/include/network.hpp +++ b/include/network.hpp @@ -25,6 +25,8 @@ class Network std::size_t agent_idx, const std::vector & buffer_neighbours, const std::vector & buffer_weights ); + void push_back_neighbour_and_weight(size_t i, size_t j, WeightT w); + void transpose(); private: diff --git a/src/network.cpp b/src/network.cpp index 7147db2..15b5f5e 100644 --- a/src/network.cpp +++ b/src/network.cpp @@ -59,6 +59,12 @@ void Seldon::Network::set_neighbours_and_weights( weight_list[agent_idx] = buffer_weights; } +void Seldon::Network::push_back_neighbour_and_weight( size_t i, size_t j, WeightT w ) +{ + neighbour_list[i].push_back( j ); + weight_list[i].push_back( w ); +} + void Seldon::Network::get_weights( std::size_t agent_idx, std::vector & buffer ) const { buffer = weight_list[agent_idx]; From 50c8cdec0bc400d8405df0857e9f53908dd19426 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sat, 14 Oct 2023 20:14:20 +0000 Subject: [PATCH 19/32] ActivityDrivenModel: reciprocity --- include/models/ActivityDrivenModel.hpp | 4 ++++ src/models/ActivityDrivenModel.cpp | 33 +++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index ffbe535..22c25b3 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -3,8 +3,11 @@ #include "agent.hpp" #include "model.hpp" #include "network.hpp" +#include #include +#include #include +#include #include namespace Seldon @@ -39,6 +42,7 @@ class ActivityAgentModel : public Model> std::vector agents_current_copy; // Random number generation std::mt19937 & gen; // reference to simulation Mersenne-Twister engine + std::set> reciprocal_edge_buffer{}; public: // Model-specific parameters diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 62bdcb8..277a9af 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -34,9 +34,12 @@ void Seldon::ActivityAgentModel::iteration() { Model::iteration(); - std::uniform_real_distribution<> dis_activation( 0.0, 1.0 ); // Opinion initial values + std::uniform_real_distribution<> dis_activation( 0.0, 1.0 ); // Opinion initial values + std::uniform_real_distribution<> dis_reciprocation( 0.0, 1.0 ); // Opinion initial values std::vector contacted_agents{}; + reciprocal_edge_buffer.clear(); // Clear the reciprocal edge buffer + for( size_t idx_agent = 0; idx_agent < network.n_agents(); idx_agent++ ) { // Test if the agent is activated @@ -49,6 +52,14 @@ void Seldon::ActivityAgentModel::iteration() Seldon::draw_unique_k_from_n( idx_agent, m, network.n_agents(), contacted_agents, gen ); // now contacted_agents contains the indices of the contacted agents + + // Fill the outgoing edges into the reciprocal edge buffer + for( const auto & idx_outgoing : contacted_agents ) + { + reciprocal_edge_buffer.insert( + { idx_agent, idx_outgoing } ); // insert the edge idx_agent -> idx_outgoing + } + // Set the *outgoing* edges network.set_neighbours_and_weights( idx_agent, contacted_agents, 1.0 ); } @@ -56,9 +67,25 @@ void Seldon::ActivityAgentModel::iteration() { network.set_neighbours_and_weights( idx_agent, {}, {} ); } + } - // TODO: implement reciprocity - // ... + // Reciprocity check + for( size_t idx_agent = 0; idx_agent < network.n_agents(); idx_agent++ ) + { + // Get the outgoing edges + network.get_neighbours( idx_agent, contacted_agents ); + // For each outgoing edge we check if the reverse edge already exists + for( const auto & idx_outgoing : contacted_agents ) + { + // If the edge is not reciprocated + if( !reciprocal_edge_buffer.contains( { idx_outgoing, idx_agent } ) ) + { + if( dis_reciprocation( gen ) < reciprocity ) + { + network.push_back_neighbour_and_weight( idx_outgoing, idx_agent, 1.0 ); + } + } + } } network.transpose(); // transpose the network, so that we have incoming edges From 9fbe749f3c2ae48d239332685809ee03c12b7fb8 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sun, 15 Oct 2023 21:10:08 +0000 Subject: [PATCH 20/32] implemented weighted reservoir sampling in util/math.hpp --- include/util/math.hpp | 48 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/include/util/math.hpp b/include/util/math.hpp index 22528b3..aa5acf9 100644 --- a/include/util/math.hpp +++ b/include/util/math.hpp @@ -1,7 +1,9 @@ #pragma once #include #include +#include #include +#include #include namespace Seldon @@ -53,4 +55,50 @@ inline void draw_unique_k_from_n( std::sample( SequenceGenerator( 0, ignore_idx ), SequenceGenerator( n, ignore_idx ), buffer.begin(), k, gen ); } +template +void reservoir_sampling_A_ExpJ( + size_t k, size_t n, WeightCallbackT weight, std::vector & buffer, std::mt19937 & mt ) +{ + std::uniform_real_distribution distribution( 0.0, 1.0 ); + + std::vector reservoir( k ); + using QueueItemT = std::pair; + + auto compare = []( const QueueItemT & item1, const QueueItemT & item2 ) { return item1.second > item2.second; }; + std::priority_queue, decltype( compare )> H; + + size_t idx = 0; + while( idx < n & H.size() < k ) + { + double r = std::pow( distribution( mt ), 1.0 / weight( idx ) ); + H.push( { idx, r } ); + idx++; + } + + auto X = std::log( distribution( mt ) ) / std::log( H.top().second ); + while( idx < n ) + { + auto w = weight( idx ); + X -= w; + if( X <= 0 ) + { + auto t = std::pow( H.top().second, w ); + auto uniform_from_t_to_one = distribution( mt ) * ( 1.0 - t ) + t; // Random number in interval [t, 1.0] + auto r = std::pow( uniform_from_t_to_one, 1.0 / w ); + H.pop(); + H.push( { idx, r } ); + X = std::log( distribution( mt ) ) / std::log( H.top().second ); + } + idx++; + } + + buffer.resize( H.size() ); + + for( size_t i = 0; i < k; i++ ) + { + buffer[i] = H.top().first; + H.pop(); + } +} + } // namespace Seldon \ No newline at end of file From 6b83cc646ec6d4969eff81b4d5bd7f714d2d019d Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sun, 15 Oct 2023 21:10:34 +0000 Subject: [PATCH 21/32] ActivityDrivenModel: implemented homophily --- src/models/ActivityDrivenModel.cpp | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 277a9af..498d159 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -47,11 +47,17 @@ void Seldon::ActivityAgentModel::iteration() if( activated ) { - // Contact m_agents according to the homophily distribution - // TODO: use homophily stuff instead of uniform - Seldon::draw_unique_k_from_n( - idx_agent, m, network.n_agents(), contacted_agents, - gen ); // now contacted_agents contains the indices of the contacted agents + + // Implement the weight for the probability of agent `idx_agent` contacting agent `j` + // Not normalised since, this is taken care of by the reservoir sampling + auto weight_callback = [idx_agent, this]( size_t j ) { + if( idx_agent == j ) // The agent does not contact itself + return 0.0; + return std::pow( + std::abs( this->agents[idx_agent].data.opinion - this->agents[j].data.opinion ), this->homophily ); + }; + + Seldon::reservoir_sampling_A_ExpJ( m, network.n_agents(), weight_callback, contacted_agents, gen ); // Fill the outgoing edges into the reciprocal edge buffer for( const auto & idx_outgoing : contacted_agents ) From 32c2ce46473f8c21ed911dc253ea3a372df638bc Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sun, 15 Oct 2023 21:12:09 +0000 Subject: [PATCH 22/32] ActivityDrivenModel: fixed sign in homophily --- src/models/ActivityDrivenModel.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 498d159..ca256bc 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -49,12 +49,12 @@ void Seldon::ActivityAgentModel::iteration() { // Implement the weight for the probability of agent `idx_agent` contacting agent `j` - // Not normalised since, this is taken care of by the reservoir sampling + // Not normalised since this is taken care of by the reservoir sampling auto weight_callback = [idx_agent, this]( size_t j ) { if( idx_agent == j ) // The agent does not contact itself return 0.0; return std::pow( - std::abs( this->agents[idx_agent].data.opinion - this->agents[j].data.opinion ), this->homophily ); + std::abs( this->agents[idx_agent].data.opinion - this->agents[j].data.opinion ), -this->homophily ); }; Seldon::reservoir_sampling_A_ExpJ( m, network.n_agents(), weight_callback, contacted_agents, gen ); From 73c317c8a930850d1c9ed4815687226bd1344ab3 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sun, 15 Oct 2023 21:25:43 +0000 Subject: [PATCH 23/32] moved power_law_distribution to utility/math.hpp --- include/util/math.hpp | 27 +++++++++++++++++++++++++++ src/models/ActivityDrivenModel.cpp | 8 +++----- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/include/util/math.hpp b/include/util/math.hpp index aa5acf9..1d68856 100644 --- a/include/util/math.hpp +++ b/include/util/math.hpp @@ -101,4 +101,31 @@ void reservoir_sampling_A_ExpJ( } } +/** + * @brief Power law distribution for random numbers. + * A continuous random distribution on the range [eps, infty) with equal + * distribution + * p(x) = (1-gamma)/(1-eps^(1-gamma)) * x^(-gamma) + */ +template +class power_law_distribution +{ +private: + ScalarT eps; + ScalarT gamma; + std::uniform_real_distribution dist + = std::uniform_real_distribution( 0.0, 1.0 ); // Uniform random variable for activities + +public: + power_law_distribution( ScalarT eps, ScalarT gamma ) : eps( eps ), gamma( gamma ) {} + + template + ScalarT operator()( Generator & gen ) + { + return std::pow( + ( 1.0 - std::pow( eps, ( 1.0 - gamma ) ) ) * dist( gen ) + std::pow( eps, ( 1.0 - gamma ) ), + ( 1.0 / ( 1.0 - gamma ) ) ); + } +}; + } // namespace Seldon \ No newline at end of file diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index ca256bc..bea8806 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -15,8 +15,8 @@ Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, void Seldon::ActivityAgentModel::get_agents_from_power_law() { - std::uniform_real_distribution<> dis_opinion( -1, 1 ); // Opinion initial values - std::uniform_real_distribution<> dist_activity( 0, 1 ); // Uniform random variable for activities + std::uniform_real_distribution<> dis_opinion( -1, 1 ); // Opinion initial values + power_law_distribution<> dist_activity( eps, gamma ); // The activities should be drawn from a power law distribution for( size_t i = 0; i < agents.size(); i++ ) @@ -24,9 +24,7 @@ void Seldon::ActivityAgentModel::get_agents_from_power_law() agents[i].data.opinion = dis_opinion( gen ); // Draw the opinion value // Draw from a power law distribution (1-gamma)/(1-eps^(1-gamma)) * a^(-gamma) - agents[i].data.activity = std::pow( - ( 1 - std::pow( eps, ( 1 - gamma ) ) ) * dist_activity( gen ) + std::pow( eps, ( 1 - gamma ) ), - ( 1 / ( 1 - gamma ) ) ); + agents[i].data.activity = dist_activity( gen ); } } From 9c630f83bb074e41de2e26a6bb85dc3b0e5ca85d Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sun, 15 Oct 2023 21:29:53 +0000 Subject: [PATCH 24/32] ActivityDrivenModel: some commment changes --- src/models/ActivityDrivenModel.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index bea8806..209ff76 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -9,20 +9,18 @@ Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, network( network ), agents_current_copy( std::vector( n_agents ) ), gen( gen ) -{ - // Initial conditions for the opinions, initialize to [-1,1] -} +{ } void Seldon::ActivityAgentModel::get_agents_from_power_law() { std::uniform_real_distribution<> dis_opinion( -1, 1 ); // Opinion initial values power_law_distribution<> dist_activity( eps, gamma ); + // Initial conditions for the opinions, initialize to [-1,1] // The activities should be drawn from a power law distribution for( size_t i = 0; i < agents.size(); i++ ) { agents[i].data.opinion = dis_opinion( gen ); // Draw the opinion value - // Draw from a power law distribution (1-gamma)/(1-eps^(1-gamma)) * a^(-gamma) agents[i].data.activity = dist_activity( gen ); } From b4988d5c67124be0eeb30dba25dca6d2fd5f6362 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sun, 15 Oct 2023 21:39:41 +0000 Subject: [PATCH 25/32] Implemented ActivityAgent::from_string --- include/models/ActivityDrivenModel.hpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index 22c25b3..ffc5d5d 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -28,8 +28,9 @@ inline std::string Agent::to_string() const template<> inline void Agent::from_string( const std::string & str ) { - // TODO - throw std::runtime_error( "Agent::from_string not implemented yet" ); + auto pos_comma = str.find_first_of( ',' ); + data.opinion = std::stod( str.substr( 0, pos_comma ) ); + data.activity = std::stod( str.substr( pos_comma+1, str.size() ) ); }; class ActivityAgentModel : public Model> From 3fab1c01f60082d4a6237373652d1ba99e5a1a8e Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sun, 15 Oct 2023 21:46:18 +0000 Subject: [PATCH 26/32] Small comment change in math.hpp --- include/util/math.hpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/util/math.hpp b/include/util/math.hpp index 1d68856..a69b575 100644 --- a/include/util/math.hpp +++ b/include/util/math.hpp @@ -103,8 +103,9 @@ void reservoir_sampling_A_ExpJ( /** * @brief Power law distribution for random numbers. - * A continuous random distribution on the range [eps, infty) with equal - * distribution + * A continuous random distribution on the range [eps, infty) + * with p(x) ~ x^(-gamma) + * Including normalization the PDF is * p(x) = (1-gamma)/(1-eps^(1-gamma)) * x^(-gamma) */ template From f4e5307d60d60d31b4e132764e71ae0054007fec Mon Sep 17 00:00:00 2001 From: Amrita Goswami Date: Sun, 15 Oct 2023 21:50:07 +0000 Subject: [PATCH 27/32] Runge kutta --- include/models/ActivityDrivenModel.hpp | 3 + include/network.hpp | 2 +- src/main.cpp | 2 +- src/models/ActivityDrivenModel.cpp | 77 +++++++++++++++++++++----- src/network.cpp | 10 ++-- 5 files changed, 72 insertions(+), 22 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index ffc5d5d..cfa7d25 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -45,6 +45,9 @@ class ActivityAgentModel : public Model> std::mt19937 & gen; // reference to simulation Mersenne-Twister engine std::set> reciprocal_edge_buffer{}; + void get_euler_slopes( std::vector & k_previous, double factor, std::vector & k_buffer ); + void get_euler_slopes( std::vector & k_buffer ); + public: // Model-specific parameters double dt = 0.01; // Timestep for the integration of the coupled ODEs diff --git a/include/network.hpp b/include/network.hpp index 1ab892a..fb602be 100644 --- a/include/network.hpp +++ b/include/network.hpp @@ -25,7 +25,7 @@ class Network std::size_t agent_idx, const std::vector & buffer_neighbours, const std::vector & buffer_weights ); - void push_back_neighbour_and_weight(size_t i, size_t j, WeightT w); + void push_back_neighbour_and_weight( size_t i, size_t j, WeightT w ); void transpose(); diff --git a/src/main.cpp b/src/main.cpp index bd443ee..ee4be9d 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -42,7 +42,7 @@ int main( int argc, char * argv[] ) auto simulation = Seldon::Simulation( config_file_path.string(), network_file, agent_file ); - fmt::print( "{}", simulation.model->get_agent(0)->to_string() ); + fmt::print( "{}", simulation.model->get_agent( 0 )->to_string() ); Seldon::IO::network_to_dot_file( *simulation.network, ( output_dir_path / fs::path( "network.dot" ) ).string() ); Seldon::IO::network_to_file( simulation, ( output_dir_path / fs::path( "network.txt" ) ).string() ); diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 209ff76..b1ea650 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -46,7 +46,8 @@ void Seldon::ActivityAgentModel::iteration() // Implement the weight for the probability of agent `idx_agent` contacting agent `j` // Not normalised since this is taken care of by the reservoir sampling - auto weight_callback = [idx_agent, this]( size_t j ) { + auto weight_callback = [idx_agent, this]( size_t j ) + { if( idx_agent == j ) // The agent does not contact itself return 0.0; return std::pow( @@ -92,30 +93,78 @@ void Seldon::ActivityAgentModel::iteration() network.transpose(); // transpose the network, so that we have incoming edges - // Integrate the ODE + // Integrate the ODE using 4th order Runge-Kutta + auto k1_buffer = std::vector(); + auto k2_buffer = std::vector(); + auto k3_buffer = std::vector(); + auto k4_buffer = std::vector(); + + // k_1 = hf(x_n,y_n) + get_euler_slopes( k1_buffer ); + + // k_2 = hf(x_n+1/2h,y_n+1/2k_1) + get_euler_slopes( k1_buffer, 0.5, k2_buffer ); + + // k_3 = hf(x_n+1/2h,y_n+1/2k_2) + get_euler_slopes( k2_buffer, 0.5, k3_buffer ); + + // k_4 = hf(x_n+h,y_n+k_3) + get_euler_slopes( k3_buffer, 1.0, k4_buffer ); + + // Update the agent opinions + for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) + { + // y_(n+1) = y_n+1/6k_1+1/3k_2+1/3k_3+1/6k_4+O(h^5) + agents[idx_agent].data.opinion + += ( k1_buffer[idx_agent] + 2 * k2_buffer[idx_agent] + 2 * k3_buffer[idx_agent] + k4_buffer[idx_agent] ) + / 6.0; + } +} + +void Seldon::ActivityAgentModel::get_euler_slopes( std::vector & k_buffer ) +{ + // k_1 = hf(x_n,y_n) + // h is the timestep auto neighbour_buffer = std::vector(); size_t j_index = 0; - agents_current_copy = agents; // Set the copy to the current state of agents. TODO: this is somewhat wasteful since - // activities are not changing + k_buffer.resize( network.n_agents() ); - for( size_t i = 0; i < network.n_agents(); i++ ) + for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) { - network.get_neighbours( i, neighbour_buffer ); // Get the incoming neighbours + network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours + k_buffer[idx_agent] = -agents[idx_agent].data.opinion; + // Loop through neighbouring agents for( size_t j = 0; j < neighbour_buffer.size(); j++ ) { - // TODO: currently this uses an euler integration -> use RK4 j_index = neighbour_buffer[j]; - agents_current_copy[i].data.opinion - += dt * ( -agents[i].data.opinion + K * ( std::tanh( alpha * agents[j_index].data.opinion ) ) ); + k_buffer[idx_agent] += K * ( std::tanh( alpha * agents[j_index].data.opinion ) ); } + // Multiply by the timestep + k_buffer[idx_agent] *= dt; } +} + +void Seldon::ActivityAgentModel::get_euler_slopes( + std::vector & k_previous, double factor, std::vector & k_buffer ) +{ + // h is the timestep + auto neighbour_buffer = std::vector(); + size_t j_index = 0; - // Update the agents from the copy - for( std::size_t i = 0; i < agents.size(); i++ ) + k_buffer.resize( network.n_agents() ); + + for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) { - max_opinion_diff - = std::max( max_opinion_diff, std::abs( agents[i].data.opinion - agents_current_copy[i].data.opinion ) ); - agents[i] = agents_current_copy[i]; + network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours + k_buffer[idx_agent] = -( agents[idx_agent].data.opinion + factor * k_previous[idx_agent] ); + // Loop through neighbouring agents + for( size_t j = 0; j < neighbour_buffer.size(); j++ ) + { + j_index = neighbour_buffer[j]; + k_buffer[idx_agent] += K * ( std::tanh( alpha * ( agents[j_index].data.opinion + factor * k_previous[j_index] ) ) ); + } + // Multiply by the timestep + k_buffer[idx_agent] *= dt; } } \ No newline at end of file diff --git a/src/network.cpp b/src/network.cpp index 15b5f5e..d693540 100644 --- a/src/network.cpp +++ b/src/network.cpp @@ -48,12 +48,10 @@ void Seldon::Network::set_neighbours_and_weights( std::size_t agent_idx, const std::vector & buffer_neighbours, const std::vector & buffer_weights ) { - if( buffer_neighbours.size() != buffer_weights.size() ) - [[unlikely]] - { - throw std::runtime_error( - "Network::set_neighbours_and_weights: both buffers need to have the same length!" ); - } + if( buffer_neighbours.size() != buffer_weights.size() ) [[unlikely]] + { + throw std::runtime_error( "Network::set_neighbours_and_weights: both buffers need to have the same length!" ); + } neighbour_list[agent_idx] = buffer_neighbours; weight_list[agent_idx] = buffer_weights; From 94c1c16b60e94543e92d51746cb60f5ba3da3c3f Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Sun, 15 Oct 2023 22:20:52 +0000 Subject: [PATCH 28/32] slight refactoring --- include/models/ActivityDrivenModel.hpp | 33 ++++++++++-- src/models/ActivityDrivenModel.cpp | 71 ++++---------------------- src/network.cpp | 10 ++-- 3 files changed, 45 insertions(+), 69 deletions(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index cfa7d25..6518fa3 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -30,7 +30,7 @@ inline void Agent::from_string( const std::string & str ) { auto pos_comma = str.find_first_of( ',' ); data.opinion = std::stod( str.substr( 0, pos_comma ) ); - data.activity = std::stod( str.substr( pos_comma+1, str.size() ) ); + data.activity = std::stod( str.substr( pos_comma + 1, str.size() ) ); }; class ActivityAgentModel : public Model> @@ -45,8 +45,35 @@ class ActivityAgentModel : public Model> std::mt19937 & gen; // reference to simulation Mersenne-Twister engine std::set> reciprocal_edge_buffer{}; - void get_euler_slopes( std::vector & k_previous, double factor, std::vector & k_buffer ); - void get_euler_slopes( std::vector & k_buffer ); + // Buffers for RK4 integration + std::vector k1_buffer{}; + std::vector k2_buffer{}; + std::vector k3_buffer{}; + std::vector k4_buffer{}; + + template + void get_euler_slopes( std::vector & k_buffer, Opinion_Callback opinion ) + { + // h is the timestep + auto neighbour_buffer = std::vector(); + size_t j_index = 0; + + k_buffer.resize( network.n_agents() ); + + for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) + { + network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours + k_buffer[idx_agent] = -opinion( idx_agent ); + // Loop through neighbouring agents + for( size_t j = 0; j < neighbour_buffer.size(); j++ ) + { + j_index = neighbour_buffer[j]; + k_buffer[idx_agent] += K * std::tanh( alpha * opinion( j_index ) ); + } + // Multiply by the timestep + k_buffer[idx_agent] *= dt; + } + } public: // Model-specific parameters diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index b1ea650..2cbd95d 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -9,7 +9,8 @@ Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, network( network ), agents_current_copy( std::vector( n_agents ) ), gen( gen ) -{ } +{ +} void Seldon::ActivityAgentModel::get_agents_from_power_law() { @@ -46,8 +47,7 @@ void Seldon::ActivityAgentModel::iteration() // Implement the weight for the probability of agent `idx_agent` contacting agent `j` // Not normalised since this is taken care of by the reservoir sampling - auto weight_callback = [idx_agent, this]( size_t j ) - { + auto weight_callback = [idx_agent, this]( size_t j ) { if( idx_agent == j ) // The agent does not contact itself return 0.0; return std::pow( @@ -94,22 +94,17 @@ void Seldon::ActivityAgentModel::iteration() network.transpose(); // transpose the network, so that we have incoming edges // Integrate the ODE using 4th order Runge-Kutta - auto k1_buffer = std::vector(); - auto k2_buffer = std::vector(); - auto k3_buffer = std::vector(); - auto k4_buffer = std::vector(); // k_1 = hf(x_n,y_n) - get_euler_slopes( k1_buffer ); - + get_euler_slopes( k1_buffer, [this]( size_t i ) { return this->agents[i].data.opinion; } ); // k_2 = hf(x_n+1/2h,y_n+1/2k_1) - get_euler_slopes( k1_buffer, 0.5, k2_buffer ); - + get_euler_slopes( + k2_buffer, [this]( size_t i ) { return this->agents[i].data.opinion + 0.5 * this->k1_buffer[i]; } ); // k_3 = hf(x_n+1/2h,y_n+1/2k_2) - get_euler_slopes( k2_buffer, 0.5, k3_buffer ); - + get_euler_slopes( + k3_buffer, [this]( size_t i ) { return this->agents[i].data.opinion + 0.5 * this->k2_buffer[i]; } ); // k_4 = hf(x_n+h,y_n+k_3) - get_euler_slopes( k3_buffer, 1.0, k4_buffer ); + get_euler_slopes( k4_buffer, [this]( size_t i ) { return this->agents[i].data.opinion + this->k3_buffer[i]; } ); // Update the agent opinions for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) @@ -119,52 +114,4 @@ void Seldon::ActivityAgentModel::iteration() += ( k1_buffer[idx_agent] + 2 * k2_buffer[idx_agent] + 2 * k3_buffer[idx_agent] + k4_buffer[idx_agent] ) / 6.0; } -} - -void Seldon::ActivityAgentModel::get_euler_slopes( std::vector & k_buffer ) -{ - // k_1 = hf(x_n,y_n) - // h is the timestep - auto neighbour_buffer = std::vector(); - size_t j_index = 0; - - k_buffer.resize( network.n_agents() ); - - for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) - { - network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours - k_buffer[idx_agent] = -agents[idx_agent].data.opinion; - // Loop through neighbouring agents - for( size_t j = 0; j < neighbour_buffer.size(); j++ ) - { - j_index = neighbour_buffer[j]; - k_buffer[idx_agent] += K * ( std::tanh( alpha * agents[j_index].data.opinion ) ); - } - // Multiply by the timestep - k_buffer[idx_agent] *= dt; - } -} - -void Seldon::ActivityAgentModel::get_euler_slopes( - std::vector & k_previous, double factor, std::vector & k_buffer ) -{ - // h is the timestep - auto neighbour_buffer = std::vector(); - size_t j_index = 0; - - k_buffer.resize( network.n_agents() ); - - for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) - { - network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours - k_buffer[idx_agent] = -( agents[idx_agent].data.opinion + factor * k_previous[idx_agent] ); - // Loop through neighbouring agents - for( size_t j = 0; j < neighbour_buffer.size(); j++ ) - { - j_index = neighbour_buffer[j]; - k_buffer[idx_agent] += K * ( std::tanh( alpha * ( agents[j_index].data.opinion + factor * k_previous[j_index] ) ) ); - } - // Multiply by the timestep - k_buffer[idx_agent] *= dt; - } } \ No newline at end of file diff --git a/src/network.cpp b/src/network.cpp index d693540..15b5f5e 100644 --- a/src/network.cpp +++ b/src/network.cpp @@ -48,10 +48,12 @@ void Seldon::Network::set_neighbours_and_weights( std::size_t agent_idx, const std::vector & buffer_neighbours, const std::vector & buffer_weights ) { - if( buffer_neighbours.size() != buffer_weights.size() ) [[unlikely]] - { - throw std::runtime_error( "Network::set_neighbours_and_weights: both buffers need to have the same length!" ); - } + if( buffer_neighbours.size() != buffer_weights.size() ) + [[unlikely]] + { + throw std::runtime_error( + "Network::set_neighbours_and_weights: both buffers need to have the same length!" ); + } neighbour_list[agent_idx] = buffer_neighbours; weight_list[agent_idx] = buffer_weights; From 9080cf9fcaf86f1b2b08a158d9a4993618db81ad Mon Sep 17 00:00:00 2001 From: Amrita Goswami Date: Mon, 16 Oct 2023 20:35:50 +0000 Subject: [PATCH 29/32] Unit tests: Now we use catch2 inbuilt matchers --- test/test_network.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/test/test_network.cpp b/test/test_network.cpp index 839ee8d..a067822 100644 --- a/test/test_network.cpp +++ b/test/test_network.cpp @@ -1,6 +1,7 @@ #include "network.hpp" #include "network_generation.hpp" #include +#include #include #include #include @@ -28,13 +29,9 @@ TEST_CASE( "Testing the network class" ) std::vector buffer_w_get{}; network->get_neighbours( 3, buffer_n_get ); network->get_weights( 3, buffer_w_get ); - for( size_t i = 0; i < buffer_n_get.size(); i++ ) - { - // CAREFUL: this assumes that the order of edges has been preserved - // which at the moment seems reasonable ... but you never know - REQUIRE( buffer_n[i] == buffer_n_get[i] ); - REQUIRE( buffer_w[i] == buffer_w_get[i] ); - } + + REQUIRE_THAT(buffer_n_get, Catch::Matchers::UnorderedRangeEquals(buffer_n)); + REQUIRE_THAT(buffer_w_get, Catch::Matchers::UnorderedRangeEquals(buffer_w)); // Now we test the transpose() function @@ -69,7 +66,6 @@ TEST_CASE( "Testing the network class" ) std::tuple edge{ neighbour, i_agent, weight }; // Note that i_agent and neighbour are flipped compared to before - REQUIRE( old_edges.contains( edge ) ); // can we find the transposed edge? old_edges.extract( edge ); // extract the edge afterwards } From 8f43cbc860643c8f03763d9410be148cf977ca40 Mon Sep 17 00:00:00 2001 From: Amrita Goswami Date: Mon, 16 Oct 2023 21:04:54 +0000 Subject: [PATCH 30/32] Testing: jazzing up unit tests --- src/models/ActivityDrivenModel.cpp | 3 ++- src/network.cpp | 10 ++++------ test/test_deGroot.cpp | 4 ++-- test/test_network.cpp | 6 +++--- test/test_tarjan.cpp | 5 +++-- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 2cbd95d..eb37181 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -47,7 +47,8 @@ void Seldon::ActivityAgentModel::iteration() // Implement the weight for the probability of agent `idx_agent` contacting agent `j` // Not normalised since this is taken care of by the reservoir sampling - auto weight_callback = [idx_agent, this]( size_t j ) { + auto weight_callback = [idx_agent, this]( size_t j ) + { if( idx_agent == j ) // The agent does not contact itself return 0.0; return std::pow( diff --git a/src/network.cpp b/src/network.cpp index 15b5f5e..d693540 100644 --- a/src/network.cpp +++ b/src/network.cpp @@ -48,12 +48,10 @@ void Seldon::Network::set_neighbours_and_weights( std::size_t agent_idx, const std::vector & buffer_neighbours, const std::vector & buffer_weights ) { - if( buffer_neighbours.size() != buffer_weights.size() ) - [[unlikely]] - { - throw std::runtime_error( - "Network::set_neighbours_and_weights: both buffers need to have the same length!" ); - } + if( buffer_neighbours.size() != buffer_weights.size() ) [[unlikely]] + { + throw std::runtime_error( "Network::set_neighbours_and_weights: both buffers need to have the same length!" ); + } neighbour_list[agent_idx] = buffer_neighbours; weight_list[agent_idx] = buffer_weights; diff --git a/test/test_deGroot.cpp b/test/test_deGroot.cpp index 3284945..781d70c 100644 --- a/test/test_deGroot.cpp +++ b/test/test_deGroot.cpp @@ -34,10 +34,10 @@ TEST_CASE( "Test the DeGroot Model Symmetric", "[DeGroot]" ) model.iteration(); } while( !model.finished() ); - fmt::print( "N_iterations = {} (with convergence_tol {})\n", model.n_iterations, model.convergence_tol ); + INFO( fmt::format( "N_iterations = {} (with convergence_tol {})\n", model.n_iterations, model.convergence_tol ) ); for( size_t i = 0; i < n_agents; i++ ) { - fmt::print( "Opinion {} = {}\n", i, model.agents[i].data ); + INFO( fmt::format( "Opinion {} = {}\n", i, model.agents[i].data ) ); REQUIRE_THAT( model.agents[i].data, WithinAbs( 0.5, model.convergence_tol * 10.0 ) ); } } \ No newline at end of file diff --git a/test/test_network.cpp b/test/test_network.cpp index a067822..47a0076 100644 --- a/test/test_network.cpp +++ b/test/test_network.cpp @@ -30,8 +30,8 @@ TEST_CASE( "Testing the network class" ) network->get_neighbours( 3, buffer_n_get ); network->get_weights( 3, buffer_w_get ); - REQUIRE_THAT(buffer_n_get, Catch::Matchers::UnorderedRangeEquals(buffer_n)); - REQUIRE_THAT(buffer_w_get, Catch::Matchers::UnorderedRangeEquals(buffer_w)); + REQUIRE_THAT( buffer_n_get, Catch::Matchers::UnorderedRangeEquals( buffer_n ) ); + REQUIRE_THAT( buffer_w_get, Catch::Matchers::UnorderedRangeEquals( buffer_w ) ); // Now we test the transpose() function @@ -65,7 +65,7 @@ TEST_CASE( "Testing the network class" ) auto weight = buffer_w[i_neighbour]; std::tuple edge{ neighbour, i_agent, weight - }; // Note that i_agent and neighbour are flipped compared to before + }; // Note that i_agent and neighbour are flipped compared to before REQUIRE( old_edges.contains( edge ) ); // can we find the transposed edge? old_edges.extract( edge ); // extract the edge afterwards } diff --git a/test/test_tarjan.cpp b/test/test_tarjan.cpp index c7fb551..5938932 100644 --- a/test/test_tarjan.cpp +++ b/test/test_tarjan.cpp @@ -1,4 +1,5 @@ #include +#include #include "connectivity.hpp" #include @@ -49,7 +50,7 @@ TEST_CASE( "Test for Tarjan's algorithm for strongly connected networks", "[tarj // List of SCC // [[5, 4], [3], [2, 1, 0], [9, 8, 7, 6]] - fmt::print( "SCC = {}\n", tarjan_scc.scc_list ); + INFO( fmt::format( "SCC = {}\n", tarjan_scc.scc_list ) ); std::set> expected_scc{ { 5, 4 }, { 3 }, { 2, 1, 0 }, { 9, 8, 7, 6 } }; @@ -61,5 +62,5 @@ TEST_CASE( "Test for Tarjan's algorithm for strongly connected networks", "[tarj } // There should be 4 strongly connected components - REQUIRE( tarjan_scc.scc_list.size() == 4 ); + REQUIRE_THAT( tarjan_scc.scc_list, Catch::Matchers::SizeIs( 4 ) ); } \ No newline at end of file From 9bde5b53fba7c47a892f23a4a2523fa2a3e8738d Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Mon, 16 Oct 2023 21:23:04 +0000 Subject: [PATCH 31/32] tests: Added a test for some network functions --- test/test_network.cpp | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/test/test_network.cpp b/test/test_network.cpp index 47a0076..36f9fc8 100644 --- a/test/test_network.cpp +++ b/test/test_network.cpp @@ -19,23 +19,41 @@ TEST_CASE( "Testing the network class" ) // Does n_agents work? REQUIRE( network->n_agents() == n_agents ); + // Check that the function for setting neighbours and a single weight work + // Agent 3 + std::vector buffer_n_get{}; // buffer for getting neighbours + std::vector buffer_w_get{}; // buffer for getting the weights + std::vector neigh{ { 0, 10 } }; // new neighbours + std::vector weight{ 0.5, 0.5 }; // new weights (const) + network->set_neighbours_and_weights( 3, neigh, 0.5 ); + network->get_weights( 3, buffer_w_get ); + REQUIRE_THAT( buffer_w_get, Catch::Matchers::UnorderedRangeEquals( buffer_w_get ) ); + // Change the connections for agent 3 - std::vector buffer_n{ { 0, 10, 15 } }; - std::vector buffer_w{ 0.1, 0.2, 0.3 }; + std::vector buffer_n{ { 0, 10, 15 } }; // new neighbours + std::vector buffer_w{ 0.1, 0.2, 0.3 }; // new weights network->set_neighbours_and_weights( 3, buffer_n, buffer_w ); // Make sure the changes worked - std::vector buffer_n_get{}; - std::vector buffer_w_get{}; network->get_neighbours( 3, buffer_n_get ); network->get_weights( 3, buffer_w_get ); REQUIRE_THAT( buffer_n_get, Catch::Matchers::UnorderedRangeEquals( buffer_n ) ); REQUIRE_THAT( buffer_w_get, Catch::Matchers::UnorderedRangeEquals( buffer_w ) ); + // Check that the push_back function works for agent 3 + buffer_n.push_back( 5 ); // new neighbour + buffer_w.push_back( 1.0 ); // new weight for this new connection + network->push_back_neighbour_and_weight( 3, 5, 1.0 ); // new connection added with weight + // Check that the change worked for the push_back function + network->get_neighbours( 3, buffer_n_get ); + network->get_weights( 3, buffer_w_get ); + REQUIRE_THAT( buffer_n_get, Catch::Matchers::UnorderedRangeEquals( buffer_n ) ); + REQUIRE_THAT( buffer_w_get, Catch::Matchers::UnorderedRangeEquals( buffer_w ) ); + // Now we test the transpose() function - // First record all the old edges as tupels (i,j,w) where this edge goes from j -> i with weight w + // First record all the old edges as tuples (i,j,w) where this edge goes from j -> i with weight w std::set> old_edges; for( size_t i_agent = 0; i_agent < network->n_agents(); i_agent++ ) { From 0db57f474c4490f6f829050a88daa36ab44154a4 Mon Sep 17 00:00:00 2001 From: Moritz Sallermann Date: Mon, 16 Oct 2023 21:33:09 +0000 Subject: [PATCH 32/32] unit test for some of the sampling functions --- meson.build | 3 +- test/test_sampling.cpp | 128 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 test/test_sampling.cpp diff --git a/meson.build b/meson.build index 058cf0b..cd58a5b 100644 --- a/meson.build +++ b/meson.build @@ -24,7 +24,8 @@ exe = executable('seldon', sources_seldon + 'src/main.cpp', tests = [ ['Test Tarjan', 'test/test_tarjan.cpp'], ['Test DeGroot', 'test/test_deGroot.cpp'], - ['Test Network', 'test/test_network.cpp'] + ['Test Network', 'test/test_network.cpp'], + ['Test Sampling', 'test/test_sampling.cpp'], ] Catch2 = dependency('Catch2', method : 'cmake', modules : ['Catch2::Catch2WithMain', 'Catch2::Catch2']) diff --git a/test/test_sampling.cpp b/test/test_sampling.cpp new file mode 100644 index 0000000..3bc64d6 --- /dev/null +++ b/test/test_sampling.cpp @@ -0,0 +1,128 @@ +#include "util/math.hpp" +#include +#include +#include +#include +#include +#include +#include +#include + +double compute_p( size_t k, size_t n ) +{ + if( k == 0 ) + { + return 0.0; + } + else + { + double p = 1.0 / ( double( n ) - 1.0 ); + return p + ( 1.0 - p ) * compute_p( k - 1, n - 1 ); + } +} + +TEST_CASE( "Testing sampling functions" ) +{ + std::random_device rd; + std::mt19937 gen( rd() ); + + SECTION( "draw_unique_k_from_n", "Drawing k numbers out of n" ) + { + + const size_t N_RUNS = 10000; + + const size_t k = 6; + const size_t n = 100; + const size_t ignore_idx = 11; + + std::vector histogram( n, 0 ); // Count how often each element occurs amongst all samples + + std::vector buffer{}; + for( size_t i = 0; i < N_RUNS; i++ ) + { + Seldon::draw_unique_k_from_n( ignore_idx, k, n, buffer, gen ); + for( const auto & n : buffer ) + { + histogram[n]++; + } + } + + // In each run there is a probability of p for each element to be selected + // That means for each histogram bin we have a binomial distribution with p + double p = compute_p( k, n ); + + size_t mean = N_RUNS * p; + // The variance of a binomial distribution is var = n*p*(1-p) + size_t sigma = std::sqrt( N_RUNS * p * ( 1.0 - p ) ); + + INFO( "Binomial distribution parameters" ); + INFO( fmt::format( " p = {}", p ) ); + INFO( fmt::format( " mean = {}", mean ) ); + INFO( fmt::format( " sigma = {}", sigma ) ); + + REQUIRE( histogram[ignore_idx] == 0 ); // The ignore_idx should never be selected + + size_t number_outside_three_sigma = 0; + for( const auto & n : histogram ) + { + if( n == 0 ) + { + continue; + } + INFO( fmt::format( " n = {}", n ) ); + INFO( fmt::format( " mean = {}", mean ) ); + INFO( fmt::format( " sigma = {}", sigma ) ); + + if( std::abs( double( n ) - double( mean ) ) > 3.0 * sigma ) + { + number_outside_three_sigma++; + } + + REQUIRE_THAT( n, Catch::Matchers::WithinAbs( mean, 5 * sigma ) ); + } + + if( number_outside_three_sigma > 0.01 * N_RUNS ) + WARN( fmt::format( + "Many deviations beyond the 3 sigma range. {} out of {}", number_outside_three_sigma, N_RUNS ) ); + } + + SECTION( "weighted_reservior_sampling", "Testing weighted reservoir sampling with A_ExpJ algorithm" ) + { + + const size_t N_RUNS = 10000; + + const size_t k = 6; + const size_t n = 100; + const size_t ignore_idx = 11; + const size_t ignore_idx2 = 29; + + std::vector histogram( n, 0 ); // Count how often each element occurs amongst all samples + + auto weight_callback = []( size_t idx ) { + if( ( idx == ignore_idx ) | ( idx == ignore_idx2 ) ) + { + return 0.0; + } + else + { + std::abs( double( n / 2.0 ) - double( idx ) ); + } + }; + + std::vector buffer{}; + + for( size_t i = 0; i < N_RUNS; i++ ) + { + Seldon::reservoir_sampling_A_ExpJ( k, n, weight_callback, buffer, gen ); + for( const auto & n : buffer ) + { + histogram[n]++; + } + } + + REQUIRE( histogram[ignore_idx] == 0 ); // The ignore_idx should never be selected + REQUIRE( histogram[ignore_idx2] == 0 ); // The ignore_idx should never be selected + + // TODO: histogram and sigma test + } +} \ No newline at end of file