Skip to content

Commit

Permalink
Merge pull request #15 from seldon-code/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
MSallermann authored Mar 7, 2024
2 parents 2f7fb41 + 7c04f3d commit d9ca402
Show file tree
Hide file tree
Showing 20 changed files with 565 additions and 101 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
*.txt

.cache/

output/
build/

# pixi
Expand Down
15 changes: 14 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ micromamba create -f environment.yml
micromamba activate seldonenv
```

### Compilation
### Compilation and Installation

We use `meson` to compile and build Seldon.

Expand All @@ -31,6 +31,13 @@ meson setup build
meson compile -C build
```

To install `seldon` to your `conda` environment, run the following:

```bash
meson setup build --prefix $CONDA_PREFIX
meson install -C build
```

### Quick Start

Run the executable, and provide the input configuration TOML file (as the first
Expand All @@ -41,6 +48,12 @@ cd build
./seldon /path/to/config -o /path/to/output/dir
```

If you've installed it, you can simply run `seldon` anywhere.

```bash
seldon /path/to/config -o /path/to/output/dir
```

#### Output files
The file `network.txt` contains information about the network.
First column is the index of the agent, then the next column is the number of incoming agent connections *including* the agent itself. Subsequent columns are the neighbouring incoming agent indices and weights. In addition, every iteration produces a *double* opinion value for each agent. These are outputted to files named opinions_i.txt.
Expand Down
21 changes: 12 additions & 9 deletions examples/ActivityDriven/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,23 @@ model = "ActivityDriven"

[io]
n_output_network = 20 # Write the network every 20 iterations
print_progress = true # Print the iteration time ; if not set, then always print

[model]
max_iterations = 500 # If not set, max iterations is infinite

[ActivityDriven]
dt = 0.01 # Timestep for the integration of the coupled ODEs
m = 10 # Number of agents contacted, when the agent is active
eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 0.5 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random
alpha = 3.0 # Controversialness of the issue, must be greater than 0.
K = 3.0 # Social interaction strength
dt = 0.01 # Timestep for the integration of the coupled ODEs
m = 10 # Number of agents contacted, when the agent is active
eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 0.5 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random
alpha = 3.0 # Controversialness of the issue, must be greater than 0.
K = 3.0 # Social interaction strength
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents
mean_weights = false # Use the meanfield approximation of the network edges

[network]
number_of_agents = 1000
connections_per_agent = 10
connections_per_agent = 10
26 changes: 26 additions & 0 deletions examples/ActivityDrivenMeanField/conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
[simulation]
model = "ActivityDriven"
rng_seed = 12345678 # Leaving this empty will pick a random seed

[io]
n_output_network = 20 # Write the network every 20 iterations
print_progress = true # Print the iteration time ; if not set, then always print

[model]
max_iterations = 2000 # If not set, max iterations is infinite

[ActivityDriven]
dt = 0.01 # Timestep for the integration of the coupled ODEs
m = 10 # Number of agents contacted, when the agent is active
eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 0.5 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 0.0 # aka beta. if zero, agents pick their interaction partners at random
alpha = 3.0 # Controversialness of the issue, must be greater than 0.
K = 3.0 # Social interaction strength
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents
mean_weights = true # Use the meanfield approximation of the network edges

[network]
number_of_agents = 100
connections_per_agent = 10
3 changes: 3 additions & 0 deletions examples/DeGroot/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
model = "DeGroot"
# rng_seed = 120 # Leaving this empty will pick a random seed

[io]
print_progress = false # Print the iteration time ; if not set, then always prints

[model]
max_iterations = 20 # If not set, max iterations is infinite

Expand Down
20 changes: 18 additions & 2 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
private:
double max_opinion_diff = 0;
Network & network;
std::vector<AgentT> agents_current_copy;
std::vector<std::vector<Network::WeightT>> contact_prob_list; // Probability of choosing i in 1 to m rounds
// Random number generation
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
std::set<std::pair<size_t, size_t>> reciprocal_edge_buffer{};
Expand All @@ -56,25 +56,31 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
{
// h is the timestep
auto neighbour_buffer = std::vector<size_t>();
auto weight_buffer = std::vector<Network::WeightT>();
size_t j_index = 0;

k_buffer.resize( network.n_agents() );

for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent )
{
network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours
network.get_weights( idx_agent, weight_buffer ); // Get incoming weights
k_buffer[idx_agent] = -opinion( idx_agent );
// Loop through neighbouring agents
for( size_t j = 0; j < neighbour_buffer.size(); j++ )
{
j_index = neighbour_buffer[j];
k_buffer[idx_agent] += K * std::tanh( alpha * opinion( j_index ) );
k_buffer[idx_agent] += K * weight_buffer[j] * std::tanh( alpha * opinion( j_index ) );
}
// Multiply by the timestep
k_buffer[idx_agent] *= dt;
}
}

void update_network_probabilistic();
void update_network_mean();
void update_network();

public:
// Model-specific parameters
double dt = 0.01; // Timestep for the integration of the coupled ODEs
Expand All @@ -89,8 +95,18 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
double reciprocity = 0.5;
double K = 3.0; // Social interaction strength; K>0

bool mean_activities = false;
bool mean_weights = false;

double convergence_tol = 1e-12; // TODO: ??

// bot @TODO: less hacky
bool bot_present = false;
size_t n_bots = 0; // The first n_bots agents are bots
std::vector<int> bot_m = std::vector<int>( 0 );
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );

ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set
Expand Down
9 changes: 9 additions & 0 deletions include/network.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,17 @@ class Network
std::size_t n_agents() const;

void get_neighbours( std::size_t agent_idx, std::vector<size_t> & buffer ) const;

void get_weights( std::size_t agent_idx, std::vector<WeightT> & buffer ) const;

std::size_t get_n_edges( std::size_t agent_idx ) const;

WeightT & get_weight( std::size_t agent_idx, std::size_t i_weight );

std::size_t & get_neighbour( std::size_t agent_idx, std::size_t i_neighbour );

void set_weights( std::size_t agent_idx, const std::vector<WeightT> & weights );

void set_neighbours_and_weights(
std::size_t agent_idx, const std::vector<size_t> & buffer_neighbours, const WeightT & weight );

Expand Down
2 changes: 2 additions & 0 deletions include/network_generation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,7 @@ namespace Seldon
{
// Returns a unique pointer to a new network with n_connections per agent
std::unique_ptr<Network> generate_n_connections( size_t n_agents, int n_connections, std::mt19937 & gen );
std::unique_ptr<Network> generate_fully_connected( size_t n_agents, Network::WeightT weight = 0.0 );
std::unique_ptr<Network> generate_fully_connected( size_t n_agents, std::mt19937 & gen );
std::unique_ptr<Network> generate_from_file( const std::string & file );
} // namespace Seldon
1 change: 1 addition & 0 deletions include/simulation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ class Simulation
// Write out the agents/network every n iterations, nullopt means never
std::optional<size_t> n_output_agents = 1;
std::optional<size_t> n_output_network = std::nullopt;
bool print_progress = true; // Print the iteration time, by default always prints
};

private:
Expand Down
11 changes: 10 additions & 1 deletion include/util/io.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,16 @@ inline void network_to_file( Simulation & simulation, const std::string & file_p
network.get_neighbours( idx_agent, buffer_neighbours );
network.get_weights( idx_agent, buffer_weights );

std::string row = fmt::format( "{:>5}, {:>5}, ", idx_agent, buffer_neighbours.size() );
std::string row = fmt::format( "{:>5}, {:>5}", idx_agent, buffer_neighbours.size() );

if( buffer_neighbours.empty() )
{
row += "\n";
}
else
{
row += ", ";
}

for( const auto & idx_neighbour : buffer_neighbours )
{
Expand Down
6 changes: 6 additions & 0 deletions include/util/math.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,12 @@ class power_law_distribution
( 1.0 - std::pow( eps, ( 1.0 - gamma ) ) ) * dist( gen ) + std::pow( eps, ( 1.0 - gamma ) ),
( 1.0 / ( 1.0 - gamma ) ) );
}

ScalarT mean()
{
return -( 1.0 - gamma ) / ( 2.0 - gamma ) * std::pow( eps, 2.0 - gamma )
/ ( 1.0 - std::pow( eps, 1.0 - gamma ) );
}
};

} // namespace Seldon
3 changes: 2 additions & 1 deletion meson.build
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
project('seldon', 'cpp',
version : '0.1',
default_options : ['warning_level=3', 'cpp_std=c++20', ])
default_options : ['warning_level=3', 'cpp_std=c++20', 'optimization=3'])

add_global_arguments('-Wno-unused-local-typedefs', language : 'cpp')

Expand All @@ -27,6 +27,7 @@ tests = [
['Test Tarjan', 'test/test_tarjan.cpp'],
['Test DeGroot', 'test/test_deGroot.cpp'],
['Test Network', 'test/test_network.cpp'],
['Test Network Generation', 'test/test_network_generation.cpp'],
['Test Sampling', 'test/test_sampling.cpp'],
]

Expand Down
32 changes: 30 additions & 2 deletions src/main.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#include "models/DeGroot.hpp"
#include "simulation.hpp"
#include <fmt/chrono.h>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <argparse/argparse.hpp>
#include <chrono>
#include <filesystem>
#include <string>
#include <util/io.hpp>
Expand Down Expand Up @@ -43,17 +45,35 @@ int main( int argc, char * argv[] )
auto simulation = Seldon::Simulation( config_file_path.string(), network_file, agent_file );

// Seldon::IO::network_to_dot_file( *simulation.network, ( output_dir_path / fs::path( "network.dot" ) ).string() );

Seldon::IO::network_to_file( simulation, ( output_dir_path / fs::path( "network_0.txt" ) ).string() );
auto filename = fmt::format( "opinions_{}.txt", 0 );
Seldon::IO::opinions_to_file( simulation, ( output_dir_path / fs::path( filename ) ).string() );

const std::optional<size_t> n_output_agents = simulation.output_settings.n_output_agents;
const std::optional<size_t> n_output_network = simulation.output_settings.n_output_network;

fmt::print( "=================================================================\n" );
fmt::print( "Starting simulation\n" );

typedef std::chrono::milliseconds ms;
auto t_simulation_start = std::chrono::high_resolution_clock::now();
do
{
auto t_iter_start = std::chrono::high_resolution_clock::now();

simulation.model->iteration();

auto t_iter_end = std::chrono::high_resolution_clock::now();
auto iter_time = std::chrono::duration_cast<ms>( t_iter_end - t_iter_start );

// Print the iteration time?
if( simulation.output_settings.print_progress )
{
fmt::print(
"Iteration {} iter_time = {:%Hh %Mm %Ss} \n", simulation.model->n_iterations,
std::chrono::floor<ms>( iter_time ) );
}

// Write out the opinion?
if( n_output_agents.has_value() && ( simulation.model->n_iterations % n_output_agents.value() == 0 ) )
{
Expand All @@ -70,6 +90,14 @@ int main( int argc, char * argv[] )

} while( !simulation.model->finished() );

fmt::print( "Finished after {} iterations.\n", simulation.model->n_iterations );
auto t_simulation_end = std::chrono::high_resolution_clock::now();
auto total_time = std::chrono::duration_cast<ms>( t_simulation_end - t_simulation_start );

fmt::print( "-----------------------------------------------------------------\n" );
fmt::print(
"Finished after {} iterations, total time = {:%Hh %Mm %Ss}\n", simulation.model->n_iterations,
std::chrono::floor<ms>( total_time ) );
fmt::print( "=================================================================\n" );

return 0;
}
Loading

0 comments on commit d9ca402

Please sign in to comment.