Skip to content

Commit

Permalink
ActivityDrivenModel: Implemented bots aka agents
Browse files Browse the repository at this point in the history
with a fixed opinion and individual activities and m
  • Loading branch information
MSallermann committed Mar 7, 2024
1 parent b5efc5d commit ebe025a
Show file tree
Hide file tree
Showing 4 changed files with 72 additions and 7 deletions.
2 changes: 1 addition & 1 deletion examples/ActivityDrivenMeanField/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@ mean_activities = false # Use the mean value of the powerlaw distribution for th
mean_weights = true # Use the meanfield approximation of the network edges

[network]
number_of_agents = 1000
number_of_agents = 100
connections_per_agent = 10
7 changes: 7 additions & 0 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,13 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>

double convergence_tol = 1e-12; // TODO: ??

// bot @TODO: less hacky
bool bot_present = false;
size_t n_bots = 0; // The first n_bots agents are bots
std::vector<int> bot_m = std::vector<int>( 0 );
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );

ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set
Expand Down
42 changes: 36 additions & 6 deletions src/models/ActivityDrivenModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,15 @@ void Seldon::ActivityAgentModel::get_agents_from_power_law()
agents[i].data.activity = mean_activity;
}
}

if( bot_present )
{
for( size_t bot_idx = 0; bot_idx < n_bots; bot_idx++ )
{
agents[bot_idx].data.opinion = bot_opinion[bot_idx];
agents[bot_idx].data.activity = bot_activity[bot_idx];
}
}
}

void Seldon::ActivityAgentModel::update_network_probabilistic()
Expand All @@ -51,15 +60,21 @@ void Seldon::ActivityAgentModel::update_network_probabilistic()
{
// Implement the weight for the probability of agent `idx_agent` contacting agent `j`
// Not normalised since this is taken care of by the reservoir sampling
auto weight_callback = [idx_agent, this]( size_t j )
{
auto weight_callback = [idx_agent, this]( size_t j ) {
if( idx_agent == j ) // The agent does not contact itself
return 0.0;
return std::pow(
std::abs( this->agents[idx_agent].data.opinion - this->agents[j].data.opinion ), -this->homophily );
};

Seldon::reservoir_sampling_A_ExpJ( m, network.n_agents(), weight_callback, contacted_agents, gen );
int m_temp = this->m;

if( bot_present && idx_agent < n_bots )
{
m_temp = bot_m[idx_agent];
}

Seldon::reservoir_sampling_A_ExpJ( m_temp, network.n_agents(), weight_callback, contacted_agents, gen );

// Fill the outgoing edges into the reciprocal edge buffer
for( const auto & idx_outgoing : contacted_agents )
Expand Down Expand Up @@ -111,8 +126,7 @@ void Seldon::ActivityAgentModel::update_network_mean()
contact_prob_list[idx_agent] = weights; // set to zero
}

auto probability_helper = []( double omega, size_t m )
{
auto probability_helper = []( double omega, size_t m ) {
double p = 0;
for( size_t i = 1; i <= m; i++ )
p += ( std::pow( -omega, i + 1 ) + omega ) / ( omega + 1 );
Expand Down Expand Up @@ -140,12 +154,19 @@ void Seldon::ActivityAgentModel::update_network_mean()
// Go through all the neighbours of idx_agent
// Calculate the probability of i contacting j (in 1 to m rounds, assuming
// the agent is activated

int m_temp = m;
if( bot_present && idx_agent < n_bots )
{
m_temp = bot_m[idx_agent];
}

for( size_t j = 0; j < network.n_agents(); j++ )
{
double omega = weight_callback( j ) / normalization;
// We can calculate the probability of i contacting j ( i->j )
// Update contact prob_list (outgoing)
contact_prob_list[idx_agent][j] = agents[idx_agent].data.activity * probability_helper( omega, m );
contact_prob_list[idx_agent][j] = agents[idx_agent].data.activity * probability_helper( omega, m_temp );
}
}

Expand Down Expand Up @@ -182,6 +203,7 @@ void Seldon::ActivityAgentModel::update_network_mean()

void Seldon::ActivityAgentModel::update_network()
{

if( !mean_weights )
{
update_network_probabilistic();
Expand Down Expand Up @@ -218,4 +240,12 @@ void Seldon::ActivityAgentModel::iteration()
+= ( k1_buffer[idx_agent] + 2 * k2_buffer[idx_agent] + 2 * k3_buffer[idx_agent] + k4_buffer[idx_agent] )
/ 6.0;
}

if( bot_present )
{
for( size_t bot_idx = 0; bot_idx < n_bots; bot_idx++ )
{
agents[bot_idx].data.opinion = bot_opinion[bot_idx];
}
}
}
28 changes: 28 additions & 0 deletions src/simulation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "util/tomlplusplus.hpp"
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <fmt/ranges.h>
#include <cstddef>
#include <iostream>
#include <optional>
Expand Down Expand Up @@ -125,8 +126,35 @@ Seldon::Simulation::Simulation(

model_activityDriven->max_iterations = max_iterations;

// bot
model_activityDriven->bot_present = tbl["ActivityDriven"]["bot_present"].value_or<bool>( false );

if( model_activityDriven->bot_present )
{
model_activityDriven->n_bots = tbl["ActivityDriven"]["n_bots"].value_or<size_t>( 0 );

fmt::print( "Using {} bots\n", model_activityDriven->n_bots );

auto bot_opinion = tbl["ActivityDriven"]["bot_opinion"];
auto bot_m = tbl["ActivityDriven"]["bot_m"];
auto bot_activity = tbl["ActivityDriven"]["bot_activity"];

for( size_t i = 0; i < model_activityDriven->n_bots; i++ )
{
model_activityDriven->bot_opinion.push_back( bot_opinion[i].value_or<double>( 0.0 ) );
model_activityDriven->bot_m.push_back( bot_m[i].value_or<size_t>( 0 ) );
model_activityDriven->bot_activity.push_back( bot_activity[i].value_or<double>( 0.0 ) );
}

fmt::print( "Bot opinions {}\n", model_activityDriven->bot_opinion );
fmt::print( "Bot m {}\n", model_activityDriven->bot_m );
fmt::print( "Bot activities {}\n", model_activityDriven->bot_activity );
}

model_activityDriven->get_agents_from_power_law();
model = std::move( model_activityDriven );

fmt::print( "Finished model setup\n" );
}

if( cli_agent_file.has_value() )
Expand Down

0 comments on commit ebe025a

Please sign in to comment.