Skip to content

Commit

Permalink
mean-field-theory: Changed the weight update to account for agents be…
Browse files Browse the repository at this point in the history
…ing activated, and also to account for reciprocity, iff the outgoing edge j->i does not exist
  • Loading branch information
amritagos committed Oct 30, 2023
1 parent 6f7cc3e commit 6d46498
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 7 deletions.
6 changes: 3 additions & 3 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
private:
double max_opinion_diff = 0;
Network & network;
std::vector<AgentT> agents_current_copy;
std::vector<std::vector<Network::WeightT>> contact_prob_list; // Probability of choosing i in 1 to m rounds
// Random number generation
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
std::set<std::pair<size_t, size_t>> reciprocal_edge_buffer{};
Expand All @@ -56,15 +56,15 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
{
// h is the timestep
auto neighbour_buffer = std::vector<size_t>();
auto weight_buffer = std::vector<Network::WeightT>();
auto weight_buffer = std::vector<Network::WeightT>();
size_t j_index = 0;

k_buffer.resize( network.n_agents() );

for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent )
{
network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours
network.get_weights(idx_agent, weight_buffer); // Get incoming weights
network.get_weights( idx_agent, weight_buffer ); // Get incoming weights
k_buffer[idx_agent] = -opinion( idx_agent );
// Loop through neighbouring agents
for( size_t j = 0; j < neighbour_buffer.size(); j++ )
Expand Down
26 changes: 22 additions & 4 deletions src/models/ActivityDrivenModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
Seldon::ActivityAgentModel::ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen )
: Model<Seldon::ActivityAgentModel::AgentT>( n_agents ),
network( network ),
agents_current_copy( std::vector<AgentT>( n_agents ) ),
contact_prob_list( std::vector<std::vector<Network::WeightT>>() ),
gen( gen )
{
}
Expand Down Expand Up @@ -109,6 +109,7 @@ void Seldon::ActivityAgentModel::update_network_mean()
for( size_t idx_agent = 0; idx_agent < network.n_agents(); idx_agent++ )
{
network.set_weights( idx_agent, weights );
contact_prob_list[idx_agent] = weights; // set to zero
}

auto probability_helper = []( double omega, size_t m )
Expand Down Expand Up @@ -138,18 +139,35 @@ void Seldon::ActivityAgentModel::update_network_mean()
}

// Go through all the neighbours of idx_agent
// Calculate the probability of i contacting j (in 1 to m rounds, assuming
// the agent is activated
for( size_t j = 0; j < network.n_agents(); j++ )
{
double omega = weight_callback( j ) / normalization;
// We have calculated the outgoing weight i->j
// We can calculate the probability of i contacting j ( i->j )
// Update contact prob_list (outgoing)
contact_prob_list[idx_agent][j] = probability_helper( omega, m );
}

// Calculate the actual weights and reciprocity
for( size_t j = 0; j < network.n_agents(); j++ )
{
double prob_contact_ij = contact_prob_list[idx_agent][j];
double prob_contact_ji = contact_prob_list[j][idx_agent];

// Set the incoming agent weight, j-i in weight list
auto & win_ji = network.get_weight( j, idx_agent );
win_ji += probability_helper( omega, m );
win_ji += agents[idx_agent].data.activity * prob_contact_ij;

// Handle the reciprocity for j->i
// Update incoming weight i-j
auto & win_ij = network.get_weight( idx_agent, j );
win_ij += win_ji * reciprocity;
// The probability of reciprocating is
// Prob(j is not activated)*prob(reciprocity) + Prob(j is active but i is not chosen)*prob(reciprocity)
// And prob(reciprocity) is basically the weight * reciprocity
double prob_cond_reciprocity = win_ji * reciprocity;
win_ij += ( 1 - agents[j].data.activity ) * prob_cond_reciprocity
+ ( agents[j].data.activity * ( 1 - prob_contact_ji ) ) * prob_cond_reciprocity;
}
}
}
Expand Down

0 comments on commit 6d46498

Please sign in to comment.