Skip to content

Commit

Permalink
Merge pull request #12 from seldon-code/activity-driven-model
Browse files Browse the repository at this point in the history
Activity driven model
  • Loading branch information
MSallermann authored Oct 18, 2023
2 parents 9d3a7bb + 0db57f4 commit e91a86b
Show file tree
Hide file tree
Showing 19 changed files with 724 additions and 106 deletions.
20 changes: 20 additions & 0 deletions examples/ActivityDriven/conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
[simulation]
model = "ActivityDriven"
# rng_seed = 120 # Leaving this empty will pick a random seed

[model]
max_iterations = 20 # If not set, max iterations is infinite

[ActivityDriven]
dt = 0.01 # Timestep for the integration of the coupled ODEs
m = 10 # Number of agents contacted, when the agent is active
eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 0.5 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random
alpha = 3.0 # Controversialness of the issue, must be greater than 0.
K = 3.0 # Social interaction strength

[network]
number_of_agents = 1000
connections_per_agent = 10
14 changes: 8 additions & 6 deletions include/agent.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,28 +4,30 @@
namespace Seldon
{

/* Agent<T> is a base class for actual agents with opinion type T, it needs to implement to_string and from_string*/
/* Agent<T> is a base class for actual agents with data type T
(which contains an opinion and perhaps some other things),
it needs to implement to_string and from_string*/
template<typename T>
class Agent : public AgentBase
{
public:
using opinion_t = T;
opinion_t opinion;
using data_t = T;
data_t data;
Agent() = default;
Agent( opinion_t opinion ) : opinion( opinion ) {}
Agent( data_t data ) : data( data ) {}

void from_string( const std::string & str );

std::string to_string() const override
{
return fmt::format( "{}", opinion );
return fmt::format( "{}", data );
}
};

template<>
inline void Agent<double>::from_string( const std::string & str )
{
opinion = std::stod( str );
data = std::stod( str );
}

} // namespace Seldon
15 changes: 6 additions & 9 deletions include/model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class Model : public ModelBase
Model( size_t n_agents ) : agents( std::vector<AgentT>( int( n_agents ), AgentT() ) ) {}
Model( std::vector<AgentT> && agents ) : agents( agents ) {}

void Agents_from_File( const std::string & file )
void agents_from_file( const std::string & file ) override
{
agents.clear();

Expand All @@ -39,7 +39,7 @@ class Model : public ModelBase
auto line = file_contents.substr( start_of_line, end_of_line - start_of_line );
start_of_line = end_of_line + 1;
// TODO: check if empty or comment
if( line.size() == 0 )
if( line.empty() )
{
break;
}
Expand All @@ -57,7 +57,10 @@ class Model : public ModelBase
}
}

void iteration() override = 0;
void iteration() override
{
n_iterations++;
};

bool finished() override
{
Expand All @@ -77,10 +80,4 @@ class Model : public ModelBase
}
};

template<typename AgentT_>
void Seldon::Model<AgentT_>::iteration()
{
n_iterations++;
};

} // namespace Seldon
3 changes: 2 additions & 1 deletion include/model_base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ class ModelBase
virtual AgentBase * get_agent( int idx ) = 0; // Use this to get an abstract representation of the agent at idx
virtual void iteration() = 0;
virtual bool finished() = 0;
virtual ~ModelBase() = default;
virtual void agents_from_file( const std::string & file ) = 0;
virtual ~ModelBase() = default;
};

} // namespace Seldon
104 changes: 104 additions & 0 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
#pragma once

#include "agent.hpp"
#include "model.hpp"
#include "network.hpp"
#include <cstddef>
#include <random>
#include <set>
#include <stdexcept>
#include <utility>
#include <vector>

namespace Seldon
{

struct ActivityAgentData
{
double opinion = 0; // x_i
double activity = 0; // a_i
};

template<>
inline std::string Agent<ActivityAgentData>::to_string() const
{
return fmt::format( "{}, {}", data.opinion, data.activity );
};

template<>
inline void Agent<ActivityAgentData>::from_string( const std::string & str )
{
auto pos_comma = str.find_first_of( ',' );
data.opinion = std::stod( str.substr( 0, pos_comma ) );
data.activity = std::stod( str.substr( pos_comma + 1, str.size() ) );
};

class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
{
using AgentT = Agent<ActivityAgentData>;

private:
double max_opinion_diff = 0;
Network & network;
std::vector<AgentT> agents_current_copy;
// Random number generation
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
std::set<std::pair<size_t, size_t>> reciprocal_edge_buffer{};

// Buffers for RK4 integration
std::vector<double> k1_buffer{};
std::vector<double> k2_buffer{};
std::vector<double> k3_buffer{};
std::vector<double> k4_buffer{};

template<typename Opinion_Callback>
void get_euler_slopes( std::vector<double> & k_buffer, Opinion_Callback opinion )
{
// h is the timestep
auto neighbour_buffer = std::vector<size_t>();
size_t j_index = 0;

k_buffer.resize( network.n_agents() );

for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent )
{
network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours
k_buffer[idx_agent] = -opinion( idx_agent );
// Loop through neighbouring agents
for( size_t j = 0; j < neighbour_buffer.size(); j++ )
{
j_index = neighbour_buffer[j];
k_buffer[idx_agent] += K * std::tanh( alpha * opinion( j_index ) );
}
// Multiply by the timestep
k_buffer[idx_agent] *= dt;
}
}

public:
// Model-specific parameters
double dt = 0.01; // Timestep for the integration of the coupled ODEs
// Various free parameters
int m = 10; // Number of agents contacted, when the agent is active
double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1]
double gamma = 2.1; // Exponent of activity power law distribution of activities
double alpha = 3.0; // Controversialness of the issue, must be greater than 0.
double homophily = 0.5; // aka beta. if zero, agents pick their interaction partners at random
// Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling
// j also sends feedback to i. So every agent can have more than m incoming connections
double reciprocity = 0.5;
double K = 3.0; // Social interaction strength; K>0

double convergence_tol = 1e-12; // TODO: ??

ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set

void iteration() override;

// bool finished() overteration() override;
// bool finished() override;
};

} // namespace Seldon
12 changes: 12 additions & 0 deletions include/network.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,21 @@ class Network
Network( std::vector<std::vector<size_t>> && neighbour_list, std::vector<std::vector<WeightT>> && weight_list );

std::size_t n_agents() const;

void get_neighbours( std::size_t agent_idx, std::vector<size_t> & buffer ) const;
void get_weights( std::size_t agent_idx, std::vector<WeightT> & buffer ) const;

void set_neighbours_and_weights(
std::size_t agent_idx, const std::vector<size_t> & buffer_neighbours, const WeightT & weight );

void set_neighbours_and_weights(
std::size_t agent_idx, const std::vector<size_t> & buffer_neighbours,
const std::vector<WeightT> & buffer_weights );

void push_back_neighbour_and_weight( size_t i, size_t j, WeightT w );

void transpose();

private:
std::vector<std::vector<size_t>> neighbour_list; // Neighbour list for the connections
std::vector<std::vector<WeightT>> weight_list; // List for the interaction weights of each connection
Expand Down
1 change: 0 additions & 1 deletion include/util/io.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ namespace Seldon
namespace IO
{


inline void network_to_dot_file( const Network & network, const std::string & file_path )
{
std::fstream fs;
Expand Down
132 changes: 132 additions & 0 deletions include/util/math.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
#pragma once
#include <algorithm>
#include <cstddef>
#include <queue>
#include <random>
#include <utility>
#include <vector>

namespace Seldon
{

// Function for getting a vector of k agents (corresponding to connections)
// drawing from n agents (without duplication)
// ignore_idx ignores the index of the agent itself, since we will later add the agent itself ourselves to prevent duplication
inline void draw_unique_k_from_n(
std::size_t ignore_idx, std::size_t k, std::size_t n, std::vector<std::size_t> & buffer, std::mt19937 & gen )
{
struct SequenceGenerator
{
/* An iterator that generates a sequence of integers 2, 3, 4 ...*/
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = size_t;
using pointer = size_t *; // or also value_type*
using reference = size_t &;

SequenceGenerator( const size_t i_, const size_t ignore_idx ) : i( i_ ), ignore_idx( ignore_idx )
{
if( i == ignore_idx )
{
i++;
}
}
size_t i;
size_t ignore_idx;

size_t & operator*()
{
return i;
};
bool operator==( const SequenceGenerator & it1 )
{
return i == it1.i;
};
SequenceGenerator & operator++()
{
i++;
if( i == ignore_idx )
i++;
return *this;
}
};

buffer.resize( k );
std::sample( SequenceGenerator( 0, ignore_idx ), SequenceGenerator( n, ignore_idx ), buffer.begin(), k, gen );
}

template<typename WeightCallbackT>
void reservoir_sampling_A_ExpJ(
size_t k, size_t n, WeightCallbackT weight, std::vector<std::size_t> & buffer, std::mt19937 & mt )
{
std::uniform_real_distribution<double> distribution( 0.0, 1.0 );

std::vector<size_t> reservoir( k );
using QueueItemT = std::pair<size_t, double>;

auto compare = []( const QueueItemT & item1, const QueueItemT & item2 ) { return item1.second > item2.second; };
std::priority_queue<QueueItemT, std::vector<QueueItemT>, decltype( compare )> H;

size_t idx = 0;
while( idx < n & H.size() < k )
{
double r = std::pow( distribution( mt ), 1.0 / weight( idx ) );
H.push( { idx, r } );
idx++;
}

auto X = std::log( distribution( mt ) ) / std::log( H.top().second );
while( idx < n )
{
auto w = weight( idx );
X -= w;
if( X <= 0 )
{
auto t = std::pow( H.top().second, w );
auto uniform_from_t_to_one = distribution( mt ) * ( 1.0 - t ) + t; // Random number in interval [t, 1.0]
auto r = std::pow( uniform_from_t_to_one, 1.0 / w );
H.pop();
H.push( { idx, r } );
X = std::log( distribution( mt ) ) / std::log( H.top().second );
}
idx++;
}

buffer.resize( H.size() );

for( size_t i = 0; i < k; i++ )
{
buffer[i] = H.top().first;
H.pop();
}
}

/**
* @brief Power law distribution for random numbers.
* A continuous random distribution on the range [eps, infty)
* with p(x) ~ x^(-gamma)
* Including normalization the PDF is
* p(x) = (1-gamma)/(1-eps^(1-gamma)) * x^(-gamma)
*/
template<typename ScalarT = double>
class power_law_distribution
{
private:
ScalarT eps;
ScalarT gamma;
std::uniform_real_distribution<ScalarT> dist
= std::uniform_real_distribution<ScalarT>( 0.0, 1.0 ); // Uniform random variable for activities

public:
power_law_distribution( ScalarT eps, ScalarT gamma ) : eps( eps ), gamma( gamma ) {}

template<typename Generator>
ScalarT operator()( Generator & gen )
{
return std::pow(
( 1.0 - std::pow( eps, ( 1.0 - gamma ) ) ) * dist( gen ) + std::pow( eps, ( 1.0 - gamma ) ),
( 1.0 / ( 1.0 - gamma ) ) );
}
};

} // namespace Seldon
Loading

0 comments on commit e91a86b

Please sign in to comment.